var/home/core/zuul-output/0000755000175000017500000000000015110311041014510 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110321724015465 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005127124315110321717017700 0ustar rootrootNov 22 10:39:38 crc systemd[1]: Starting Kubernetes Kubelet... Nov 22 10:39:38 crc restorecon[4689]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:38 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 10:39:39 crc restorecon[4689]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 22 10:39:40 crc kubenswrapper[4926]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.289930 4926 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300424 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300460 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300467 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300474 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300481 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300487 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300493 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300498 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300504 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300510 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300516 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300523 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300529 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300535 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300542 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300547 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300553 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300558 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300565 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300572 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300578 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300584 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300612 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300618 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300625 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300633 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300639 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300644 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300650 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300655 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300660 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300666 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300671 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300676 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300682 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300689 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300694 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300699 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300704 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300725 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300733 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300738 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300744 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300749 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300755 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300761 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300767 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300774 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300780 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300793 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300800 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300805 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300810 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300816 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300821 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300826 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300831 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300836 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300841 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300846 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300851 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300856 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300861 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300866 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300871 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300876 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300881 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300903 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300908 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300914 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.300919 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302594 4926 flags.go:64] FLAG: --address="0.0.0.0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302611 4926 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302621 4926 flags.go:64] FLAG: --anonymous-auth="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302629 4926 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302637 4926 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302644 4926 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302652 4926 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302660 4926 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302667 4926 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302673 4926 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302680 4926 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302689 4926 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302695 4926 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302701 4926 flags.go:64] FLAG: --cgroup-root="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302707 4926 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302714 4926 flags.go:64] FLAG: --client-ca-file="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302720 4926 flags.go:64] FLAG: --cloud-config="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302726 4926 flags.go:64] FLAG: --cloud-provider="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302732 4926 flags.go:64] FLAG: --cluster-dns="[]" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302740 4926 flags.go:64] FLAG: --cluster-domain="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302746 4926 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302752 4926 flags.go:64] FLAG: --config-dir="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302758 4926 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302764 4926 flags.go:64] FLAG: --container-log-max-files="5" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302772 4926 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302778 4926 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302785 4926 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302791 4926 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302797 4926 flags.go:64] FLAG: --contention-profiling="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302803 4926 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302809 4926 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302816 4926 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302822 4926 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302829 4926 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302835 4926 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302841 4926 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302848 4926 flags.go:64] FLAG: --enable-load-reader="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302855 4926 flags.go:64] FLAG: --enable-server="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302861 4926 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302868 4926 flags.go:64] FLAG: --event-burst="100" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302875 4926 flags.go:64] FLAG: --event-qps="50" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302881 4926 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302905 4926 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302911 4926 flags.go:64] FLAG: --eviction-hard="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302927 4926 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302933 4926 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302939 4926 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302950 4926 flags.go:64] FLAG: --eviction-soft="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302956 4926 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302962 4926 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302968 4926 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302974 4926 flags.go:64] FLAG: --experimental-mounter-path="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302980 4926 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302986 4926 flags.go:64] FLAG: --fail-swap-on="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.302992 4926 flags.go:64] FLAG: --feature-gates="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303000 4926 flags.go:64] FLAG: --file-check-frequency="20s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303006 4926 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303012 4926 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303019 4926 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303025 4926 flags.go:64] FLAG: --healthz-port="10248" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303031 4926 flags.go:64] FLAG: --help="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303037 4926 flags.go:64] FLAG: --hostname-override="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303043 4926 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303049 4926 flags.go:64] FLAG: --http-check-frequency="20s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303056 4926 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303062 4926 flags.go:64] FLAG: --image-credential-provider-config="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303067 4926 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303074 4926 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303080 4926 flags.go:64] FLAG: --image-service-endpoint="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303086 4926 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303092 4926 flags.go:64] FLAG: --kube-api-burst="100" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303099 4926 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303105 4926 flags.go:64] FLAG: --kube-api-qps="50" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303112 4926 flags.go:64] FLAG: --kube-reserved="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303118 4926 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303124 4926 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303130 4926 flags.go:64] FLAG: --kubelet-cgroups="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303136 4926 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303142 4926 flags.go:64] FLAG: --lock-file="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303148 4926 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303155 4926 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303161 4926 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303170 4926 flags.go:64] FLAG: --log-json-split-stream="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303177 4926 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303184 4926 flags.go:64] FLAG: --log-text-split-stream="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303190 4926 flags.go:64] FLAG: --logging-format="text" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303196 4926 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303203 4926 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303209 4926 flags.go:64] FLAG: --manifest-url="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303215 4926 flags.go:64] FLAG: --manifest-url-header="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303223 4926 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303229 4926 flags.go:64] FLAG: --max-open-files="1000000" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303237 4926 flags.go:64] FLAG: --max-pods="110" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303243 4926 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303250 4926 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303256 4926 flags.go:64] FLAG: --memory-manager-policy="None" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303262 4926 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303268 4926 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303274 4926 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303280 4926 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303294 4926 flags.go:64] FLAG: --node-status-max-images="50" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303299 4926 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303305 4926 flags.go:64] FLAG: --oom-score-adj="-999" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303312 4926 flags.go:64] FLAG: --pod-cidr="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303318 4926 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303328 4926 flags.go:64] FLAG: --pod-manifest-path="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303334 4926 flags.go:64] FLAG: --pod-max-pids="-1" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303340 4926 flags.go:64] FLAG: --pods-per-core="0" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303346 4926 flags.go:64] FLAG: --port="10250" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303352 4926 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303358 4926 flags.go:64] FLAG: --provider-id="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303364 4926 flags.go:64] FLAG: --qos-reserved="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303370 4926 flags.go:64] FLAG: --read-only-port="10255" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303376 4926 flags.go:64] FLAG: --register-node="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303382 4926 flags.go:64] FLAG: --register-schedulable="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303388 4926 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303398 4926 flags.go:64] FLAG: --registry-burst="10" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303403 4926 flags.go:64] FLAG: --registry-qps="5" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303409 4926 flags.go:64] FLAG: --reserved-cpus="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303416 4926 flags.go:64] FLAG: --reserved-memory="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303424 4926 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303430 4926 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303437 4926 flags.go:64] FLAG: --rotate-certificates="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303443 4926 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303449 4926 flags.go:64] FLAG: --runonce="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303455 4926 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303461 4926 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303467 4926 flags.go:64] FLAG: --seccomp-default="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303473 4926 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303479 4926 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303485 4926 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303491 4926 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303497 4926 flags.go:64] FLAG: --storage-driver-password="root" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303503 4926 flags.go:64] FLAG: --storage-driver-secure="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303509 4926 flags.go:64] FLAG: --storage-driver-table="stats" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303516 4926 flags.go:64] FLAG: --storage-driver-user="root" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303522 4926 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303528 4926 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303535 4926 flags.go:64] FLAG: --system-cgroups="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303541 4926 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303551 4926 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303557 4926 flags.go:64] FLAG: --tls-cert-file="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303564 4926 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303572 4926 flags.go:64] FLAG: --tls-min-version="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303579 4926 flags.go:64] FLAG: --tls-private-key-file="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303584 4926 flags.go:64] FLAG: --topology-manager-policy="none" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303591 4926 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303597 4926 flags.go:64] FLAG: --topology-manager-scope="container" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303603 4926 flags.go:64] FLAG: --v="2" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303610 4926 flags.go:64] FLAG: --version="false" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303618 4926 flags.go:64] FLAG: --vmodule="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303625 4926 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.303631 4926 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303803 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303811 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303821 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303827 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303833 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303839 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303844 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303850 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303855 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303860 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303866 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303871 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303876 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303881 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303905 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303910 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303915 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303920 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303926 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303931 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303936 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303941 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303947 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303952 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303960 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303966 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303973 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303980 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303986 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303992 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.303998 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304003 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304009 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304014 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304019 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304025 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304030 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304036 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304042 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304048 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304054 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304059 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304064 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304069 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304074 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304086 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304091 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304097 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304102 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304107 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304112 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304118 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304123 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304129 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304134 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304144 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304149 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304154 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304160 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304165 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304170 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304175 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304180 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304186 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304191 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304196 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304201 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304206 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304211 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304217 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.304224 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.304239 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.314334 4926 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.314358 4926 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314440 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314449 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314458 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314465 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314473 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314481 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314492 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314499 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314505 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314510 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314516 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314521 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314526 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314531 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314536 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314541 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314547 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314552 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314557 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314562 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314568 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314574 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314581 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314587 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314593 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314600 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314606 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314611 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314617 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314622 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314628 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314633 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314638 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314643 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314650 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314655 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314660 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314666 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314670 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314677 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314684 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314690 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314696 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314701 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314707 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314712 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314718 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314723 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314729 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314734 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314738 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314745 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314750 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314755 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314761 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314766 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314772 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314777 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314784 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314791 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314798 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314804 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314809 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314814 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314820 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314825 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314830 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314835 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314842 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314848 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.314859 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.314868 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315040 4926 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315051 4926 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315058 4926 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315065 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315071 4926 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315077 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315082 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315087 4926 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315092 4926 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315098 4926 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315106 4926 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315113 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315119 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315125 4926 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315130 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315136 4926 feature_gate.go:330] unrecognized feature gate: Example Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315142 4926 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315148 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315154 4926 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315159 4926 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315165 4926 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315170 4926 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315175 4926 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315181 4926 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315186 4926 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315191 4926 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315196 4926 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315201 4926 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315206 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315211 4926 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315217 4926 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315222 4926 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315227 4926 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315232 4926 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315238 4926 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315243 4926 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315248 4926 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315254 4926 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315260 4926 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315265 4926 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315270 4926 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315276 4926 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315281 4926 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315286 4926 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315291 4926 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315297 4926 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315304 4926 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315310 4926 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315316 4926 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315323 4926 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315329 4926 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315335 4926 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315341 4926 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315347 4926 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315353 4926 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315358 4926 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315364 4926 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315371 4926 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315377 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315382 4926 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315388 4926 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315393 4926 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315398 4926 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315403 4926 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315408 4926 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315413 4926 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315418 4926 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315423 4926 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315429 4926 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315435 4926 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.315443 4926 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.315451 4926 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.315622 4926 server.go:940] "Client rotation is on, will bootstrap in background" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.321027 4926 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.321360 4926 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.323836 4926 server.go:997] "Starting client certificate rotation" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.323864 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.326072 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-31 19:10:39.291777803 +0000 UTC Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.326168 4926 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 944h30m58.965614409s for next certificate rotation Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.357845 4926 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.360424 4926 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.387314 4926 log.go:25] "Validated CRI v1 runtime API" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.421410 4926 log.go:25] "Validated CRI v1 image API" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.425619 4926 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.433848 4926 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-22-10-34-59-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.433963 4926 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.467334 4926 manager.go:217] Machine: {Timestamp:2025-11-22 10:39:40.463386238 +0000 UTC m=+0.764991595 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654116352 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:16dcb71b-1e1f-4d77-bb74-17aa213c9052 BootID:aeb560ef-1eb5-4732-98e3-250b723cbd1b Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827056128 Type:vfs Inodes:4108168 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108168 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:c8:0b:4e Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:c8:0b:4e Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:2a:18:2f Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:a7:25:78 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:34:3f:45 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:12:7d:0d Speed:-1 Mtu:1496} {Name:eth10 MacAddress:c2:84:d2:b8:d8:a4 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:ba:35:91:c9:20:06 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654116352 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.468118 4926 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.468434 4926 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.471235 4926 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.471691 4926 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.471845 4926 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.472371 4926 topology_manager.go:138] "Creating topology manager with none policy" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.472484 4926 container_manager_linux.go:303] "Creating device plugin manager" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.473475 4926 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.473617 4926 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.474056 4926 state_mem.go:36] "Initialized new in-memory state store" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.474295 4926 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.484770 4926 kubelet.go:418] "Attempting to sync node with API server" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.484985 4926 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.485140 4926 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.485260 4926 kubelet.go:324] "Adding apiserver pod source" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.485374 4926 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.493164 4926 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.494652 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.494946 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.494651 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.495259 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.495060 4926 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.497492 4926 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.499426 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.499583 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.499691 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.499793 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.499933 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500042 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500142 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500270 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500379 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500480 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500601 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.500704 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.502036 4926 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.502768 4926 server.go:1280] "Started kubelet" Nov 22 10:39:40 crc systemd[1]: Started Kubernetes Kubelet. Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.505084 4926 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.505212 4926 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.505849 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.506064 4926 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.520053 4926 server.go:460] "Adding debug handlers to kubelet server" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.520127 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.520185 4926 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.520922 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 05:43:28.710036022 +0000 UTC Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.521003 4926 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1123h3m48.18903967s for next certificate rotation Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.521814 4926 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.521844 4926 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.521774 4926 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.522002 4926 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.522364 4926 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.522406 4926 factory.go:55] Registering systemd factory Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.522426 4926 factory.go:221] Registration of the systemd container factory successfully Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.520629 4926 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.248:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a4e0361dd3550 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 10:39:40.502725968 +0000 UTC m=+0.804331285,LastTimestamp:2025-11-22 10:39:40.502725968 +0000 UTC m=+0.804331285,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.523139 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.523248 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.523342 4926 factory.go:153] Registering CRI-O factory Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.523368 4926 factory.go:221] Registration of the crio container factory successfully Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.523398 4926 factory.go:103] Registering Raw factory Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.523416 4926 manager.go:1196] Started watching for new ooms in manager Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.523795 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.248:6443: connect: connection refused" interval="200ms" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.524562 4926 manager.go:319] Starting recovery of all containers Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534199 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534296 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534318 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534336 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534355 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534374 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534391 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534410 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534430 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534448 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534465 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534482 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534500 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534521 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534537 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534555 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534572 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534590 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534607 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534624 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534641 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534659 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534697 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534711 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534723 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534735 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534751 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534764 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534779 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534796 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534830 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534850 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534871 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534913 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534930 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534946 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534962 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534980 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.534998 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535014 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535095 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535116 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535163 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535182 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535200 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535216 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535266 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535288 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535304 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535353 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535372 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535390 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535443 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535464 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535483 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535544 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535564 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535660 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535678 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535724 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535740 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535763 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535807 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535826 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535841 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.535901 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536122 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536143 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536190 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536208 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536233 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536277 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536297 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536314 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536330 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536380 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536404 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536423 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536476 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536495 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536512 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536558 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536578 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536595 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536650 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536747 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536763 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536779 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536832 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536853 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536870 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536939 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536959 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.536976 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537023 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537069 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537112 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537138 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537154 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537196 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537307 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537332 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537348 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537414 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537599 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537625 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537670 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537688 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537704 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537722 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537769 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537841 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.537860 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538007 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538133 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538183 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538203 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538273 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538371 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538426 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538475 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538492 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538553 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538578 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538597 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538650 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538667 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538706 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538790 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538830 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538930 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538943 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.538955 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539042 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539055 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539103 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539115 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539182 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539195 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539206 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539272 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539378 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539393 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539446 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539458 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539469 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539530 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539542 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539555 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539667 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539678 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539773 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539809 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539824 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539836 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.539848 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540000 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540065 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540077 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540111 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540146 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540160 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540223 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540236 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540247 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540258 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540270 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540326 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540339 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540407 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540419 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540430 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540442 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540478 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540489 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540525 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540617 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540631 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540744 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540757 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.540768 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541213 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541226 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541271 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541338 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541359 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541370 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541428 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541442 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541497 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541509 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541545 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541562 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541575 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.541701 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543596 4926 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543629 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543650 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543756 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543787 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543826 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543837 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543849 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543860 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543872 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543903 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543921 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543959 4926 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543971 4926 reconstruct.go:97] "Volume reconstruction finished" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.543980 4926 reconciler.go:26] "Reconciler: start to sync state" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.556353 4926 manager.go:324] Recovery completed Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.571682 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.573627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.573685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.573703 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.577425 4926 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.577459 4926 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.577481 4926 state_mem.go:36] "Initialized new in-memory state store" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.577459 4926 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.580520 4926 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.580604 4926 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.580657 4926 kubelet.go:2335] "Starting kubelet main sync loop" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.580750 4926 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 22 10:39:40 crc kubenswrapper[4926]: W1122 10:39:40.581550 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.581608 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.597420 4926 policy_none.go:49] "None policy: Start" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.598402 4926 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.598516 4926 state_mem.go:35] "Initializing new in-memory state store" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.622555 4926 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.653457 4926 manager.go:334] "Starting Device Plugin manager" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.653726 4926 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.653754 4926 server.go:79] "Starting device plugin registration server" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.654402 4926 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.654436 4926 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.654613 4926 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.654715 4926 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.654725 4926 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.661133 4926 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.681741 4926 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.681817 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.682744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.682777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.682795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.682950 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.683451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.683473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.683481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.684350 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.684393 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.684454 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.684482 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.684396 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685441 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685458 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685560 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685594 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685728 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.685754 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686407 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686629 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.686725 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687666 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687787 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.687976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.688020 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.688773 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.688792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.688799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.724518 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.248:6443: connect: connection refused" interval="400ms" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747282 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747429 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747513 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747539 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747558 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747574 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747600 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747617 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747653 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747672 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747701 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.747717 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.755101 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.756134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.756180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.756195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.756220 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.756577 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.248:6443: connect: connection refused" node="crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849356 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849648 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849772 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849832 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849864 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849935 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.849966 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850081 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850203 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850246 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850361 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850365 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850481 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850395 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850666 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850559 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850798 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850841 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.850989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851215 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851550 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851618 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.851817 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.957180 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.958925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.958967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.958984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:40 crc kubenswrapper[4926]: I1122 10:39:40.959017 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:40 crc kubenswrapper[4926]: E1122 10:39:40.959601 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.248:6443: connect: connection refused" node="crc" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.020184 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.036466 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.044012 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.060148 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.064524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.071980 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-4d65ad7eccd2166f5cad506fbb9289c3f4275e293810a2e45469b930b0952e15 WatchSource:0}: Error finding container 4d65ad7eccd2166f5cad506fbb9289c3f4275e293810a2e45469b930b0952e15: Status 404 returned error can't find the container with id 4d65ad7eccd2166f5cad506fbb9289c3f4275e293810a2e45469b930b0952e15 Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.078436 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-2626b2cf47c7e439bdc53655f5977b8cce03860b23132223552416d4ca93a087 WatchSource:0}: Error finding container 2626b2cf47c7e439bdc53655f5977b8cce03860b23132223552416d4ca93a087: Status 404 returned error can't find the container with id 2626b2cf47c7e439bdc53655f5977b8cce03860b23132223552416d4ca93a087 Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.090160 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-de694e564e1e2647e7cf29f168cb7cf1acc3017b79979019258f219d3b7bc2ba WatchSource:0}: Error finding container de694e564e1e2647e7cf29f168cb7cf1acc3017b79979019258f219d3b7bc2ba: Status 404 returned error can't find the container with id de694e564e1e2647e7cf29f168cb7cf1acc3017b79979019258f219d3b7bc2ba Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.099696 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a9364e5c0a6d3234474fad0507fa66e0c8c4ec61cf13d84b3c16ce8f53ce246a WatchSource:0}: Error finding container a9364e5c0a6d3234474fad0507fa66e0c8c4ec61cf13d84b3c16ce8f53ce246a: Status 404 returned error can't find the container with id a9364e5c0a6d3234474fad0507fa66e0c8c4ec61cf13d84b3c16ce8f53ce246a Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.126004 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.248:6443: connect: connection refused" interval="800ms" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.360130 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.361949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.362022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.362035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.362075 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.362682 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.248:6443: connect: connection refused" node="crc" Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.405135 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.405286 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.507776 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.588156 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0fd90ba5b8831cde935e517e49296e4a9d041ff1a47b2d26a6c643ccca6507af"} Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.592088 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"de694e564e1e2647e7cf29f168cb7cf1acc3017b79979019258f219d3b7bc2ba"} Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.593018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2626b2cf47c7e439bdc53655f5977b8cce03860b23132223552416d4ca93a087"} Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.594144 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"4d65ad7eccd2166f5cad506fbb9289c3f4275e293810a2e45469b930b0952e15"} Nov 22 10:39:41 crc kubenswrapper[4926]: I1122 10:39:41.595219 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a9364e5c0a6d3234474fad0507fa66e0c8c4ec61cf13d84b3c16ce8f53ce246a"} Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.850162 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.850345 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.866272 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.866455 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.927062 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.248:6443: connect: connection refused" interval="1.6s" Nov 22 10:39:41 crc kubenswrapper[4926]: W1122 10:39:41.990923 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:41 crc kubenswrapper[4926]: E1122 10:39:41.991023 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.163962 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.165399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.165465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.165506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.165558 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:42 crc kubenswrapper[4926]: E1122 10:39:42.166905 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.248:6443: connect: connection refused" node="crc" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.507628 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.599984 4926 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1" exitCode=0 Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.600135 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.600063 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.601123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.601163 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.601172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.604363 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.604423 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.604446 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.604459 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.604428 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.605393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.605429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.605442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.607507 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42" exitCode=0 Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.607577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.607756 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609866 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980" exitCode=0 Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609934 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.609956 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.610639 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.610925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.610962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.610974 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.611662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.611722 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.611738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.612643 4926 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="f4752de8b3ce1c860578c5b60a69bdb23fcbf6d14241c87c1b719c047d81af9c" exitCode=0 Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.612680 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"f4752de8b3ce1c860578c5b60a69bdb23fcbf6d14241c87c1b719c047d81af9c"} Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.612812 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.613766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.613790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:42 crc kubenswrapper[4926]: I1122 10:39:42.613800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: W1122 10:39:43.286802 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:43 crc kubenswrapper[4926]: E1122 10:39:43.287225 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.506775 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:43 crc kubenswrapper[4926]: E1122 10:39:43.528098 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.248:6443: connect: connection refused" interval="3.2s" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.622912 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.622965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.622999 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.623021 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.624764 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4" exitCode=0 Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.624852 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.624858 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.626284 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.626326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.626338 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.628513 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.628519 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"6bf989ac1f115799fd40c7d673163ebf9361ac54f90ae23d92bbfb4d07d14bb0"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630190 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630743 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630817 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630726 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630842 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5"} Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630741 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.630932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.631898 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.631928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.631940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.632172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.632222 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.632241 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.767865 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.769165 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.769229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.769244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:43 crc kubenswrapper[4926]: I1122 10:39:43.769280 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:43 crc kubenswrapper[4926]: E1122 10:39:43.769878 4926 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.248:6443: connect: connection refused" node="crc" Nov 22 10:39:43 crc kubenswrapper[4926]: W1122 10:39:43.817217 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:43 crc kubenswrapper[4926]: E1122 10:39:43.817325 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:44 crc kubenswrapper[4926]: W1122 10:39:44.447230 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:44 crc kubenswrapper[4926]: E1122 10:39:44.447340 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.248:6443: connect: connection refused" logger="UnhandledError" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.507680 4926 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.248:6443: connect: connection refused Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.638639 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624"} Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.638786 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.640031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.640092 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.640143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641704 4926 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb" exitCode=0 Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641754 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb"} Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641790 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641819 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641922 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.641923 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.642453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.642483 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.642493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.643505 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:44 crc kubenswrapper[4926]: I1122 10:39:44.697642 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.070987 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.071236 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.072413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.072462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.072473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.648036 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.650940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4"} Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651008 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f"} Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651033 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150"} Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651044 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651052 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7"} Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651129 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.651196 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.652557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.652626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.652651 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.653805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.653853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:45 crc kubenswrapper[4926]: I1122 10:39:45.653873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.403250 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.660225 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc"} Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.660311 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.660350 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661604 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.661844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.942376 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.942610 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.944649 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.944712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.944735 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.951464 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.971062 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.972662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.972711 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.972736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:46 crc kubenswrapper[4926]: I1122 10:39:46.972775 4926 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.662639 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.662677 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.662690 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.664446 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.664497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.664514 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:47 crc kubenswrapper[4926]: I1122 10:39:47.665367 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.071327 4926 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.071469 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.093645 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.665604 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.665676 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.667051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.667113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:48 crc kubenswrapper[4926]: I1122 10:39:48.667133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:49 crc kubenswrapper[4926]: I1122 10:39:49.747969 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:49 crc kubenswrapper[4926]: I1122 10:39:49.748245 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:49 crc kubenswrapper[4926]: I1122 10:39:49.749817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:49 crc kubenswrapper[4926]: I1122 10:39:49.749867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:49 crc kubenswrapper[4926]: I1122 10:39:49.749917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:50 crc kubenswrapper[4926]: I1122 10:39:50.451846 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 22 10:39:50 crc kubenswrapper[4926]: I1122 10:39:50.452087 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:50 crc kubenswrapper[4926]: I1122 10:39:50.453386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:50 crc kubenswrapper[4926]: I1122 10:39:50.453464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:50 crc kubenswrapper[4926]: I1122 10:39:50.453498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:50 crc kubenswrapper[4926]: E1122 10:39:50.661251 4926 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 10:39:53 crc kubenswrapper[4926]: I1122 10:39:53.292196 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 22 10:39:53 crc kubenswrapper[4926]: I1122 10:39:53.292459 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:53 crc kubenswrapper[4926]: I1122 10:39:53.294128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:53 crc kubenswrapper[4926]: I1122 10:39:53.294217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:53 crc kubenswrapper[4926]: I1122 10:39:53.294240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:54 crc kubenswrapper[4926]: W1122 10:39:54.687392 4926 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.687602 4926 trace.go:236] Trace[1199332684]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:39:44.685) (total time: 10001ms): Nov 22 10:39:54 crc kubenswrapper[4926]: Trace[1199332684]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (10:39:54.687) Nov 22 10:39:54 crc kubenswrapper[4926]: Trace[1199332684]: [10.001923711s] [10.001923711s] END Nov 22 10:39:54 crc kubenswrapper[4926]: E1122 10:39:54.687646 4926 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.836390 4926 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:34348->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.836455 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:34348->192.168.126.11:17697: read: connection reset by peer" Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.863350 4926 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.863406 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.873474 4926 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 10:39:54 crc kubenswrapper[4926]: I1122 10:39:54.873527 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.686251 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.688389 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624" exitCode=255 Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.688424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624"} Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.688538 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.689592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.689670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.689695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:55 crc kubenswrapper[4926]: I1122 10:39:55.690827 4926 scope.go:117] "RemoveContainer" containerID="a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.410297 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.694628 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.697548 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a"} Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.697758 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.699374 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.699438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.699462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:56 crc kubenswrapper[4926]: I1122 10:39:56.705137 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:57 crc kubenswrapper[4926]: I1122 10:39:57.699726 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:57 crc kubenswrapper[4926]: I1122 10:39:57.699822 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:39:57 crc kubenswrapper[4926]: I1122 10:39:57.700864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:57 crc kubenswrapper[4926]: I1122 10:39:57.700961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:57 crc kubenswrapper[4926]: I1122 10:39:57.700982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.072505 4926 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.072575 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.701420 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.702471 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.702539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:58 crc kubenswrapper[4926]: I1122 10:39:58.702561 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.753413 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.753546 4926 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.754625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.754685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.754709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.865208 4926 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.869095 4926 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.869144 4926 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.871804 4926 trace.go:236] Trace[1360180233]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:39:49.513) (total time: 10358ms): Nov 22 10:39:59 crc kubenswrapper[4926]: Trace[1360180233]: ---"Objects listed" error: 10358ms (10:39:59.871) Nov 22 10:39:59 crc kubenswrapper[4926]: Trace[1360180233]: [10.358309234s] [10.358309234s] END Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.871852 4926 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.873522 4926 trace.go:236] Trace[1826145556]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 10:39:48.034) (total time: 11839ms): Nov 22 10:39:59 crc kubenswrapper[4926]: Trace[1826145556]: ---"Objects listed" error: 11839ms (10:39:59.873) Nov 22 10:39:59 crc kubenswrapper[4926]: Trace[1826145556]: [11.83932712s] [11.83932712s] END Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.873557 4926 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.877195 4926 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.877458 4926 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.879174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.879223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.879237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.879256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.879269 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.900374 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.904215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.904275 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.904289 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.904308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.904320 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.924079 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.927028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.927066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.927081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.927098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.927109 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.936231 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.938835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.938858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.938866 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.938878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.938900 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.946926 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.949386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.949437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.949448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.949463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.949476 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.966522 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:59Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:39:59 crc kubenswrapper[4926]: E1122 10:39:59.966636 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.967973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.968010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.968022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.968038 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:39:59 crc kubenswrapper[4926]: I1122 10:39:59.968069 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:39:59Z","lastTransitionTime":"2025-11-22T10:39:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.069874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.069932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.069940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.069953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.069962 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.172417 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.172445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.172453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.172465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.172474 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.275059 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.275134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.275149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.275181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.275196 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.377827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.377896 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.377908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.377931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.377943 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.480235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.480275 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.480286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.480304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.480330 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.497750 4926 apiserver.go:52] "Watching apiserver" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.501579 4926 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.502115 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-wqf9b","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g"] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.502853 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.503164 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.503285 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.503363 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.503440 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.503481 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.503524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.503551 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.503598 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.504005 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.504808 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.504873 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.504972 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.505620 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.505720 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.505865 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.508759 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.509138 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-xr9nd"] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.509923 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510355 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-sr572"] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510397 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510558 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510731 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510774 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.510819 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.513421 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-c6w2q"] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.513517 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.513553 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.513715 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.515754 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.516128 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.516895 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.516929 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.516956 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.517540 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.517580 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.517655 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.517690 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.517725 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.518188 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.522560 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.522660 4926 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.534701 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.547536 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.559494 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.567740 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574001 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574100 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574139 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574162 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574187 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574211 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574232 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574255 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574464 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574474 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574588 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574616 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574627 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574701 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574711 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574731 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574734 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574930 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574958 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574972 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574984 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.574995 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575023 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575046 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575066 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575084 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575103 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575120 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575138 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575155 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575149 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575176 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575180 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575193 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575252 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575281 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575351 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575351 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575363 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575491 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575507 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575266 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575539 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575560 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575547 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575619 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575637 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575656 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575636 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575695 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575695 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575792 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575797 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575793 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575922 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575967 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575985 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.575999 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576013 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576039 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576234 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576281 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576428 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576739 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576764 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576258 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576834 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576839 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576937 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576973 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.576981 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577002 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577051 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577074 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577095 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577119 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577142 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577204 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577217 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577298 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577434 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577449 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577440 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577330 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577501 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577555 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577713 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577761 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577579 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577825 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577855 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577865 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577941 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577983 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578094 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578127 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578339 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.577853 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578374 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578418 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578445 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579107 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578647 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.578719 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579048 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579656 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579665 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579694 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579727 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579748 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579769 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579789 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.579834 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.580006 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.580082 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.580339 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.580418 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.580695 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581530 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581561 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581580 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581618 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581634 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581652 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581666 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581682 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581703 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581720 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581736 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581768 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581784 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581801 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581818 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581839 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581860 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581895 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581914 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581935 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581963 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.581985 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582005 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582027 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582056 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582075 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582107 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582122 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582148 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582164 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582180 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582195 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582210 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582290 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582307 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582326 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582347 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582367 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582387 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582403 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582423 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582462 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582480 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582581 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582608 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582628 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582648 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582682 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582701 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582716 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582750 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582765 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582779 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582794 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582810 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582826 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582842 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582857 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582872 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582963 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582989 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583012 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583031 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583046 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583062 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583078 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583093 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583107 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583123 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583137 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583152 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583171 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583187 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583202 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583221 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583242 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583252 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583263 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583350 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583373 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583394 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583400 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583416 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583441 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583468 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583488 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583659 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583722 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.583744 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.583781 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:01.083763385 +0000 UTC m=+21.385368672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584125 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584318 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584414 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584342 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584621 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.584869 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585184 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585234 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585258 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585277 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585295 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585310 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585327 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585346 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585369 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585390 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585414 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585440 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585461 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585478 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585495 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585510 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585525 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585541 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585558 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585556 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585574 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585589 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585605 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585620 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585637 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585652 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585669 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585684 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585701 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585716 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585732 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585746 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585775 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585790 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.585807 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587129 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587137 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587294 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587536 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587661 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587711 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.582822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.587821 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588209 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588343 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588769 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588809 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.588935 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.589080 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.589184 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.589297 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.590835 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.591138 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.591151 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.591330 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.591709 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.591781 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.592226 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.592369 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.592514 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.592821 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.594369 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.594655 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.594673 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595114 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595347 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595453 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595773 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.595936 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596239 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596279 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596354 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596447 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596565 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596586 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.596967 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597119 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597273 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597285 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597606 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597760 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.597825 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.598367 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.598606 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.598955 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599056 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599290 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599428 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599687 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599739 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599829 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.599940 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.601144 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.601556 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.601656 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.601926 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.601992 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.602189 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.602368 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.602482 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.602851 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.602936 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.603161 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.603165 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.603419 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.603459 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.603570 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.604032 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.604428 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.604772 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.604990 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.605703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.605730 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.605789 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606057 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606061 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606275 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606306 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606423 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.606641 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.607556 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.607624 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.607835 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.608104 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609151 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609194 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609215 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609153 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609943 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.608548 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609975 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609982 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609297 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609542 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609875 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610016 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610034 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610046 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610463 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610486 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610504 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610521 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610540 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610556 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610606 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610655 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610646 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610728 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.609913 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610780 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.610987 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611044 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611089 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611117 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611356 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611597 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611736 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611830 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611900 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d4977b14-85c3-4141-9b15-1768f09e8d27-proxy-tls\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.611960 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-netns\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612052 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612131 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612180 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612344 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/385427b5-fb45-42dd-8ff8-a4c7cdad6157-hosts-file\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612373 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612417 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612442 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-os-release\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612513 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613220 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.612466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-conf-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-multus-certs\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613354 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-etc-kubernetes\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613380 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-bin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613398 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-os-release\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613467 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-socket-dir-parent\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613484 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nwn7\" (UniqueName: \"kubernetes.io/projected/d4977b14-85c3-4141-9b15-1768f09e8d27-kube-api-access-5nwn7\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613485 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613521 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d4977b14-85c3-4141-9b15-1768f09e8d27-mcd-auth-proxy-config\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613540 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613582 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613600 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-system-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.613830 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614041 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614188 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-cni-binary-copy\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614246 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-hostroot\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614369 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614373 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614524 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614582 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-kubelet\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614602 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-multus-daemon-config\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614686 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d4977b14-85c3-4141-9b15-1768f09e8d27-rootfs\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614714 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-k8s-cni-cncf-io\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614754 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-multus\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.614862 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614910 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.614977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.615144 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:01.115125037 +0000 UTC m=+21.416730324 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615243 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-cnibin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615267 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-binary-copy\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615330 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjkwg\" (UniqueName: \"kubernetes.io/projected/fa95257d-7464-4038-b2f3-aa795e4ac425-kube-api-access-hjkwg\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615358 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615378 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.615410 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-system-cni-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.617603 4926 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.618298 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52vzl\" (UniqueName: \"kubernetes.io/projected/36de2843-6491-4c54-b624-c4a3d328c164-kube-api-access-52vzl\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.618356 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rrk8\" (UniqueName: \"kubernetes.io/projected/385427b5-fb45-42dd-8ff8-a4c7cdad6157-kube-api-access-4rrk8\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.618382 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-cnibin\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.619358 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.619366 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.619230 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.619558 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.619712 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.619846 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:01.119803946 +0000 UTC m=+21.421409313 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.619975 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620002 4926 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620018 4926 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620033 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620046 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620059 4926 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620072 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620103 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620117 4926 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620130 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620144 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620157 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620169 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620181 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620193 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620204 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620217 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620229 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620254 4926 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620266 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620279 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620293 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620305 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620317 4926 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620329 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620340 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620351 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620400 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620424 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620437 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620449 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620461 4926 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620475 4926 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620487 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620499 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620511 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620523 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620537 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620549 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620562 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620585 4926 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620597 4926 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620608 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620619 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620630 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620641 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620652 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620663 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620677 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620688 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620699 4926 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620709 4926 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620724 4926 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620736 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620749 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620761 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620935 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620959 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620972 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620982 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.620995 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621005 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621015 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621025 4926 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621038 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621047 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621019 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621194 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621206 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621216 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621252 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621263 4926 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621274 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621285 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621295 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621305 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621315 4926 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621324 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621333 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621342 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621356 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621368 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621380 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621416 4926 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621428 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621441 4926 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621450 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621461 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621470 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621481 4926 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621490 4926 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621499 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621508 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621517 4926 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621525 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621533 4926 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621541 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621573 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621581 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621590 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621599 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621608 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621616 4926 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621626 4926 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621635 4926 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621644 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621653 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621662 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621674 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621685 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621695 4926 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621705 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621737 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621756 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621767 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621775 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621785 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621795 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621804 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621812 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621819 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621827 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621835 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621842 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621850 4926 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621859 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.621867 4926 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622259 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622268 4926 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622278 4926 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622286 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622295 4926 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622324 4926 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622333 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622341 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622353 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622364 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622375 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622428 4926 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622446 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622459 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622494 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622504 4926 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622512 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622520 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622529 4926 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622538 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622546 4926 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622589 4926 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622598 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622608 4926 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622617 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622625 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622656 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622665 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622673 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622682 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622690 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622698 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622708 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622749 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622758 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622766 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622775 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622783 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622809 4926 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622820 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622829 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622840 4926 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622849 4926 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622859 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622867 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622911 4926 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622920 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622928 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622940 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622948 4926 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622958 4926 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622985 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.622994 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.623002 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.623010 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.623019 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.623252 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.628231 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.630118 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.632258 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634316 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634355 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634371 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634465 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:01.13443031 +0000 UTC m=+21.436035597 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634550 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634562 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634571 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:00 crc kubenswrapper[4926]: E1122 10:40:00.634605 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:01.134596835 +0000 UTC m=+21.436202342 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.634651 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.635975 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.637773 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.640363 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.640587 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.641160 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.641405 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.642735 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.645606 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.646238 4926 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.647740 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.647946 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.648334 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.650221 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.650656 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.654438 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.656227 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.656909 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.659152 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.660012 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.660076 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.661387 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.662694 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.662957 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.664367 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.665213 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.667405 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.668168 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.669932 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.670805 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.671059 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.672212 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.672838 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.674058 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.674545 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.675034 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.676038 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.676610 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.677395 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.677698 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.678184 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.681273 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.690730 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.692482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.692512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.692521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.692557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.692568 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.698841 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.705512 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.714002 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.720604 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-system-cni-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723826 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52vzl\" (UniqueName: \"kubernetes.io/projected/36de2843-6491-4c54-b624-c4a3d328c164-kube-api-access-52vzl\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723842 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rrk8\" (UniqueName: \"kubernetes.io/projected/385427b5-fb45-42dd-8ff8-a4c7cdad6157-kube-api-access-4rrk8\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723859 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-cnibin\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723898 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723916 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d4977b14-85c3-4141-9b15-1768f09e8d27-proxy-tls\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723941 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-netns\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723958 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-os-release\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723976 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-conf-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.723990 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-multus-certs\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724005 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-etc-kubernetes\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724020 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/385427b5-fb45-42dd-8ff8-a4c7cdad6157-hosts-file\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724035 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-bin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724049 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-os-release\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-socket-dir-parent\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724087 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nwn7\" (UniqueName: \"kubernetes.io/projected/d4977b14-85c3-4141-9b15-1768f09e8d27-kube-api-access-5nwn7\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d4977b14-85c3-4141-9b15-1768f09e8d27-mcd-auth-proxy-config\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724123 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-system-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724139 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-cni-binary-copy\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724152 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-hostroot\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724166 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724189 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-kubelet\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724207 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-multus-daemon-config\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724221 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724236 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d4977b14-85c3-4141-9b15-1768f09e8d27-rootfs\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724251 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-k8s-cni-cncf-io\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724265 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-multus\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724292 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-cnibin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724307 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-binary-copy\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724325 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjkwg\" (UniqueName: \"kubernetes.io/projected/fa95257d-7464-4038-b2f3-aa795e4ac425-kube-api-access-hjkwg\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724347 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724363 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724390 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724400 4926 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724410 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724419 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724431 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724441 4926 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724450 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724458 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724466 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724474 4926 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724482 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724490 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724574 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724613 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-system-cni-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.724998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-cnibin\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.725579 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/385427b5-fb45-42dd-8ff8-a4c7cdad6157-hosts-file\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726674 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-bin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726638 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-system-cni-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726736 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-os-release\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726952 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727041 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-os-release\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727001 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-socket-dir-parent\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.726997 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-netns\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-multus-conf-dir\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727139 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-multus-certs\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-etc-kubernetes\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727342 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-hostroot\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727758 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d4977b14-85c3-4141-9b15-1768f09e8d27-mcd-auth-proxy-config\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727811 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-run-k8s-cni-cncf-io\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727830 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-cnibin\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727849 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/d4977b14-85c3-4141-9b15-1768f09e8d27-rootfs\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727873 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-cni-multus\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727915 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/36de2843-6491-4c54-b624-c4a3d328c164-host-var-lib-kubelet\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.727977 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.728335 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-multus-daemon-config\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.728533 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/36de2843-6491-4c54-b624-c4a3d328c164-cni-binary-copy\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.728922 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/fa95257d-7464-4038-b2f3-aa795e4ac425-cni-binary-copy\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.729472 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/fa95257d-7464-4038-b2f3-aa795e4ac425-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.732421 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d4977b14-85c3-4141-9b15-1768f09e8d27-proxy-tls\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.741310 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.745765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52vzl\" (UniqueName: \"kubernetes.io/projected/36de2843-6491-4c54-b624-c4a3d328c164-kube-api-access-52vzl\") pod \"multus-c6w2q\" (UID: \"36de2843-6491-4c54-b624-c4a3d328c164\") " pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.746247 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rrk8\" (UniqueName: \"kubernetes.io/projected/385427b5-fb45-42dd-8ff8-a4c7cdad6157-kube-api-access-4rrk8\") pod \"node-resolver-wqf9b\" (UID: \"385427b5-fb45-42dd-8ff8-a4c7cdad6157\") " pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.751982 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjkwg\" (UniqueName: \"kubernetes.io/projected/fa95257d-7464-4038-b2f3-aa795e4ac425-kube-api-access-hjkwg\") pod \"multus-additional-cni-plugins-sr572\" (UID: \"fa95257d-7464-4038-b2f3-aa795e4ac425\") " pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.752332 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nwn7\" (UniqueName: \"kubernetes.io/projected/d4977b14-85c3-4141-9b15-1768f09e8d27-kube-api-access-5nwn7\") pod \"machine-config-daemon-xr9nd\" (UID: \"d4977b14-85c3-4141-9b15-1768f09e8d27\") " pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.757207 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.774182 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.785941 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.794925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.794966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.794975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.794988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.795005 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.800111 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.818223 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wqf9b" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.826581 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.834326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 10:40:00 crc kubenswrapper[4926]: W1122 10:40:00.840978 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-f728fc0240256145a0ef3175eae41dc53a98dac3ee9966b63424343d6a64393b WatchSource:0}: Error finding container f728fc0240256145a0ef3175eae41dc53a98dac3ee9966b63424343d6a64393b: Status 404 returned error can't find the container with id f728fc0240256145a0ef3175eae41dc53a98dac3ee9966b63424343d6a64393b Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.843074 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 10:40:00 crc kubenswrapper[4926]: W1122 10:40:00.847644 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-9476b7693e66de33784d24199f63bcc8f929bd1195d47bc16fe0c762622c60a7 WatchSource:0}: Error finding container 9476b7693e66de33784d24199f63bcc8f929bd1195d47bc16fe0c762622c60a7: Status 404 returned error can't find the container with id 9476b7693e66de33784d24199f63bcc8f929bd1195d47bc16fe0c762622c60a7 Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.854728 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.861447 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-sr572" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.868275 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-c6w2q" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.876790 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z69nr"] Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.877528 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.879013 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.879568 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.879925 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.880922 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.881078 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.882020 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.882312 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.885903 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.895379 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.896790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.896823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.896832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.896847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.896857 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:00Z","lastTransitionTime":"2025-11-22T10:40:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.905381 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.913778 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.921055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.928618 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.941500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.960306 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:00 crc kubenswrapper[4926]: W1122 10:40:00.988194 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-304f15e63d7600fe525e4dfcfcd691da02546843f564b4b69487774c44aa33ef WatchSource:0}: Error finding container 304f15e63d7600fe525e4dfcfcd691da02546843f564b4b69487774c44aa33ef: Status 404 returned error can't find the container with id 304f15e63d7600fe525e4dfcfcd691da02546843f564b4b69487774c44aa33ef Nov 22 10:40:00 crc kubenswrapper[4926]: I1122 10:40:00.991023 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.004263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.004290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.004297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.004783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.004799 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.009572 4926 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.015707 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027354 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x999w\" (UniqueName: \"kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027427 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027457 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027489 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027527 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027551 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027577 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027599 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027621 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027687 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027708 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027728 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027747 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027769 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027787 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027811 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027857 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.027991 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.028042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.028413 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: W1122 10:40:01.039823 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4977b14_85c3_4141_9b15_1768f09e8d27.slice/crio-478b6b23db7d819427e39413d67141bc95cae47f8cb3ab8b2a32f6ac8b520edc WatchSource:0}: Error finding container 478b6b23db7d819427e39413d67141bc95cae47f8cb3ab8b2a32f6ac8b520edc: Status 404 returned error can't find the container with id 478b6b23db7d819427e39413d67141bc95cae47f8cb3ab8b2a32f6ac8b520edc Nov 22 10:40:01 crc kubenswrapper[4926]: W1122 10:40:01.052126 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa95257d_7464_4038_b2f3_aa795e4ac425.slice/crio-7b02df9eb1ddf6c03a98a9c4bf3faf5f16f14493d0f7bbce246ef2af023c3765 WatchSource:0}: Error finding container 7b02df9eb1ddf6c03a98a9c4bf3faf5f16f14493d0f7bbce246ef2af023c3765: Status 404 returned error can't find the container with id 7b02df9eb1ddf6c03a98a9c4bf3faf5f16f14493d0f7bbce246ef2af023c3765 Nov 22 10:40:01 crc kubenswrapper[4926]: W1122 10:40:01.052824 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36de2843_6491_4c54_b624_c4a3d328c164.slice/crio-076cfa7ed469cfee3ab3d96c12d5f4b6dfc2875903108d6ec1ef79c33c56930f WatchSource:0}: Error finding container 076cfa7ed469cfee3ab3d96c12d5f4b6dfc2875903108d6ec1ef79c33c56930f: Status 404 returned error can't find the container with id 076cfa7ed469cfee3ab3d96c12d5f4b6dfc2875903108d6ec1ef79c33c56930f Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.109818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.109981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.110064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.110138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.110208 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128691 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128833 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.128852 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:02.128829944 +0000 UTC m=+22.430435231 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128882 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.128937 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128940 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128965 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.128981 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:02.128969418 +0000 UTC m=+22.430574705 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.128997 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129021 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129039 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129061 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129084 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129149 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129125 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129179 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129208 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129219 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129255 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129290 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129305 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129321 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129338 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129342 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129355 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129371 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129387 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129404 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129413 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129421 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x999w\" (UniqueName: \"kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129446 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129383 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129471 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129491 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129508 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129520 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129790 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.129817 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.129872 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.129943 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:02.129923272 +0000 UTC m=+22.431528659 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.131178 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.136397 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.151090 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x999w\" (UniqueName: \"kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w\") pod \"ovnkube-node-z69nr\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.190579 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.212971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.213017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.213029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.213045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.213057 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.230235 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.230283 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230399 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230413 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230422 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230466 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:02.230452663 +0000 UTC m=+22.532057950 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230481 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230511 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230521 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.230572 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:02.230556246 +0000 UTC m=+22.532161533 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.315701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.315750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.315763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.315776 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.315786 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: W1122 10:40:01.345161 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25bc94bb_a5d1_431c_9847_2f6a02997e25.slice/crio-bed6871f225e6e6df17fd36e043a78753caee7423ab367ed98812e6abeb60395 WatchSource:0}: Error finding container bed6871f225e6e6df17fd36e043a78753caee7423ab367ed98812e6abeb60395: Status 404 returned error can't find the container with id bed6871f225e6e6df17fd36e043a78753caee7423ab367ed98812e6abeb60395 Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.418267 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.418307 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.418318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.418337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.418347 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.520814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.520858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.520870 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.520905 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.520918 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.581622 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:01 crc kubenswrapper[4926]: E1122 10:40:01.581734 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.625109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.625147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.625158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.625175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.625187 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.713295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"304f15e63d7600fe525e4dfcfcd691da02546843f564b4b69487774c44aa33ef"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.714851 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerStarted","Data":"0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.714912 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerStarted","Data":"7b02df9eb1ddf6c03a98a9c4bf3faf5f16f14493d0f7bbce246ef2af023c3765"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.717078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerStarted","Data":"13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.717106 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerStarted","Data":"076cfa7ed469cfee3ab3d96c12d5f4b6dfc2875903108d6ec1ef79c33c56930f"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.718780 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.718808 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.718821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"478b6b23db7d819427e39413d67141bc95cae47f8cb3ab8b2a32f6ac8b520edc"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.720214 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.720240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.720254 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9476b7693e66de33784d24199f63bcc8f929bd1195d47bc16fe0c762622c60a7"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.721532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.721586 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f728fc0240256145a0ef3175eae41dc53a98dac3ee9966b63424343d6a64393b"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.722617 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wqf9b" event={"ID":"385427b5-fb45-42dd-8ff8-a4c7cdad6157","Type":"ContainerStarted","Data":"06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.722639 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wqf9b" event={"ID":"385427b5-fb45-42dd-8ff8-a4c7cdad6157","Type":"ContainerStarted","Data":"3783d8b61e8d50beae6256c569d8226363f1a93cd4beb69e825f52d41d7ae8d4"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.724016 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" exitCode=0 Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.724043 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.724058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"bed6871f225e6e6df17fd36e043a78753caee7423ab367ed98812e6abeb60395"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.727710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.727741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.727751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.727766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.727775 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.730166 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.747763 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.756820 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.766280 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.773984 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.783817 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.791110 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.799027 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.808382 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.815788 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830282 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830303 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830312 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.830702 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.841963 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.855160 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.866847 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.877650 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.888673 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.897708 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.906255 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.918646 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.931260 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.932795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.932840 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.932853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.932871 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.932900 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:01Z","lastTransitionTime":"2025-11-22T10:40:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.943736 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.954005 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-4sppr"] Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.954386 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.954620 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.956461 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.957546 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 22 10:40:01 crc kubenswrapper[4926]: I1122 10:40:01.958049 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.007371 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.009862 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.024225 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.034113 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.035499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.035550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.035565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.035582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.035593 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.046742 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.059105 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.072389 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.082694 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.095583 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.106834 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.118943 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.128676 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138688 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138799 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f66b576d-83a6-4919-a918-7b075f35881e-host\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.138875 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:04.138852715 +0000 UTC m=+24.440458012 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138968 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138980 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwtrf\" (UniqueName: \"kubernetes.io/projected/f66b576d-83a6-4919-a918-7b075f35881e-kube-api-access-jwtrf\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.138985 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.139029 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.139027 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.139077 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f66b576d-83a6-4919-a918-7b075f35881e-serviceca\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.139153 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:04.139124472 +0000 UTC m=+24.440729749 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.139183 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.139223 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:04.139214784 +0000 UTC m=+24.440820071 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.181836 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240438 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240487 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f66b576d-83a6-4919-a918-7b075f35881e-host\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240520 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwtrf\" (UniqueName: \"kubernetes.io/projected/f66b576d-83a6-4919-a918-7b075f35881e-kube-api-access-jwtrf\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240546 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f66b576d-83a6-4919-a918-7b075f35881e-serviceca\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240566 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240619 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240645 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240656 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240695 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240704 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:04.240689199 +0000 UTC m=+24.542294486 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240713 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240724 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.240781 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:04.240768471 +0000 UTC m=+24.542373758 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.240951 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/f66b576d-83a6-4919-a918-7b075f35881e-host\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241774 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/f66b576d-83a6-4919-a918-7b075f35881e-serviceca\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.241819 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.262483 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwtrf\" (UniqueName: \"kubernetes.io/projected/f66b576d-83a6-4919-a918-7b075f35881e-kube-api-access-jwtrf\") pod \"node-ca-4sppr\" (UID: \"f66b576d-83a6-4919-a918-7b075f35881e\") " pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.334228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-4sppr" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.346685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.346756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.346768 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.346786 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.346798 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: W1122 10:40:02.349217 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf66b576d_83a6_4919_a918_7b075f35881e.slice/crio-e70d793ae95c2f1276aa8fc69d8e6b64be8f26ce298206479aa0e3a0bee24cdd WatchSource:0}: Error finding container e70d793ae95c2f1276aa8fc69d8e6b64be8f26ce298206479aa0e3a0bee24cdd: Status 404 returned error can't find the container with id e70d793ae95c2f1276aa8fc69d8e6b64be8f26ce298206479aa0e3a0bee24cdd Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.448960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.449000 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.449011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.449027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.449038 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.551928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.551958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.551965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.551980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.551992 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.581634 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.581750 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.582136 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.582192 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.585721 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.586670 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.587605 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.588408 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.590171 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.590988 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.591768 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.593142 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.594021 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.595442 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.596800 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.598518 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.599517 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.600622 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.601705 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.603102 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.603854 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.605704 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.606574 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.607832 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.608847 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.609831 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.611429 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.654778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.654829 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.654841 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.654854 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.654864 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.728796 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4sppr" event={"ID":"f66b576d-83a6-4919-a918-7b075f35881e","Type":"ContainerStarted","Data":"e70d793ae95c2f1276aa8fc69d8e6b64be8f26ce298206479aa0e3a0bee24cdd"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732824 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732857 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732869 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732880 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.732917 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.734575 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d" exitCode=0 Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.734640 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.736564 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.737088 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.739250 4926 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a" exitCode=255 Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.739283 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.739311 4926 scope.go:117] "RemoveContainer" containerID="a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.756442 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.765138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.765174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.765185 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.765203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.765214 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.778493 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.798371 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.829536 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.831080 4926 scope.go:117] "RemoveContainer" containerID="e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a" Nov 22 10:40:02 crc kubenswrapper[4926]: E1122 10:40:02.831289 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.833127 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.858928 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.867467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.867501 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.867512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.867524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.867532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.870729 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.881852 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.891631 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.902783 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.913542 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.923969 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.932937 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.944385 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.964305 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.969536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.969584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.969595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.969615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.969631 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:02Z","lastTransitionTime":"2025-11-22T10:40:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.977510 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:02 crc kubenswrapper[4926]: I1122 10:40:02.988729 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:02Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.007545 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.022007 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.039993 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.057327 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.068342 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.071763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.071794 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.071804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.071819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.071829 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.087160 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.130455 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.173271 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.174654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.174683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.174692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.174712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.174726 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.210807 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:39:54Z\\\",\\\"message\\\":\\\"W1122 10:39:43.887020 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:39:43.887475 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807983 cert, and key in /tmp/serving-cert-2696864206/serving-signer.crt, /tmp/serving-cert-2696864206/serving-signer.key\\\\nI1122 10:39:44.483557 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:39:44.486066 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:39:44.486279 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:39:44.487331 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2696864206/tls.crt::/tmp/serving-cert-2696864206/tls.key\\\\\\\"\\\\nF1122 10:39:54.824843 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.277065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.277107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.277119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.277133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.277144 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.318290 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.333527 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.336088 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.337864 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.349546 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.357810 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.379392 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.379432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.379440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.379470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.379479 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.385363 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.439814 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.479358 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.481959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.482006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.482018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.482037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.482048 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.512522 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.547613 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a72c3a21a999d8cd54626c67ce5ee39d420aea38febb618c2cd1dd4aa3a63624\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:39:54Z\\\",\\\"message\\\":\\\"W1122 10:39:43.887020 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1122 10:39:43.887475 1 crypto.go:601] Generating new CA for check-endpoints-signer@1763807983 cert, and key in /tmp/serving-cert-2696864206/serving-signer.crt, /tmp/serving-cert-2696864206/serving-signer.key\\\\nI1122 10:39:44.483557 1 observer_polling.go:159] Starting file observer\\\\nW1122 10:39:44.486066 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1122 10:39:44.486279 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:39:44.487331 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-2696864206/tls.crt::/tmp/serving-cert-2696864206/tls.key\\\\\\\"\\\\nF1122 10:39:54.824843 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": net/http: TLS handshake timeout\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.581105 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:03 crc kubenswrapper[4926]: E1122 10:40:03.581212 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.584624 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.584670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.584681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.584698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.584709 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.588120 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.627236 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.672242 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.686765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.686804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.686815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.686834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.686846 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.707863 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.743589 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da" exitCode=0 Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.743656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.744638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-4sppr" event={"ID":"f66b576d-83a6-4919-a918-7b075f35881e","Type":"ContainerStarted","Data":"a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.747100 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.749979 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.754344 4926 scope.go:117] "RemoveContainer" containerID="e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a" Nov 22 10:40:03 crc kubenswrapper[4926]: E1122 10:40:03.754542 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.764414 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.787668 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.789327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.789359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.789369 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.789385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.789394 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.830120 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.869521 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.892743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.892796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.892808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.892825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.892842 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.907031 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.955021 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.995527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.995567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.995576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.995592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.995602 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:03Z","lastTransitionTime":"2025-11-22T10:40:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:03 crc kubenswrapper[4926]: I1122 10:40:03.996697 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:03Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.029142 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.070203 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.098147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.098218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.098232 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.098256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.098279 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.111644 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.147746 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.158520 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.158685 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.158756 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.158772 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.158835 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:08.158809983 +0000 UTC m=+28.460415350 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.158858 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:08.158849144 +0000 UTC m=+28.460454431 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.158977 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.159058 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:08.159037079 +0000 UTC m=+28.460642406 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.189263 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.200571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.200609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.200621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.200639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.200650 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.228069 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.259545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.259615 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259743 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259772 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259784 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259794 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259816 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259829 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259840 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:08.259824437 +0000 UTC m=+28.561429724 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.259876 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:08.259861728 +0000 UTC m=+28.561467105 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.266683 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.303053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.303085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.303093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.303106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.303115 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.309866 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.405215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.405274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.405290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.405312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.405327 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.508057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.508107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.508118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.508139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.508151 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.581872 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.582105 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.583017 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:04 crc kubenswrapper[4926]: E1122 10:40:04.583165 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.610574 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.610610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.610618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.610632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.610658 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.713955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.713995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.714009 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.714027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.714039 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.764411 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.766799 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230" exitCode=0 Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.766980 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.781281 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.792604 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.802747 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.814762 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.817207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.817240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.817249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.817262 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.817271 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.826319 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.844092 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.862675 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.874422 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.886384 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.913832 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.919266 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.919299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.919307 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.919320 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.919328 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:04Z","lastTransitionTime":"2025-11-22T10:40:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.928770 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.945621 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.956184 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:04 crc kubenswrapper[4926]: I1122 10:40:04.966837 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:04Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.022408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.022836 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.022849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.022867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.022877 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.074785 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.077783 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.083617 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.095720 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.109154 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.126004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.126064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.126073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.126088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.126101 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.135301 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.149851 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.166509 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.177675 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.191773 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.208517 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.228483 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.228510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.228519 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.228531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.228541 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.248122 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.287338 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.331639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.331699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.331716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.331739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.331755 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.366793 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.400075 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.419838 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.435157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.435224 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.435242 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.435269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.435292 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.461066 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.490912 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.532040 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.537825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.537868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.537876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.537905 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.537917 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.570409 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.581093 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:05 crc kubenswrapper[4926]: E1122 10:40:05.581211 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.606121 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.640462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.640519 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.640538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.640560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.640575 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.652316 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.687177 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.733843 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.743387 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.743466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.743487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.743512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.743530 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.778835 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.782724 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28" exitCode=0 Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.782755 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.819273 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.846584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.846620 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.846632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.846648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.846660 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.853469 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.895017 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.936631 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.948946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.948982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.948993 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.949008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.949020 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:05Z","lastTransitionTime":"2025-11-22T10:40:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:05 crc kubenswrapper[4926]: I1122 10:40:05.978979 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:05Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.011606 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051657 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.051918 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.089482 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.135506 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.154504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.154749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.154914 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.155010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.155139 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.173231 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.209166 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.257429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.257673 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.257379 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.257740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.258097 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.258112 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.293090 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.332876 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.360461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.360779 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.360929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.361028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.361104 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.370191 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.411207 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.452540 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.462860 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.462945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.462960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.462983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.462999 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.492683 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.528347 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.564755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.564792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.564800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.564816 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.564825 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.569996 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.580962 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.580980 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:06 crc kubenswrapper[4926]: E1122 10:40:06.581097 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:06 crc kubenswrapper[4926]: E1122 10:40:06.581188 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.608365 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.648948 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.667220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.667268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.667283 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.667302 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.667314 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.771004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.771340 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.771350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.771363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.771372 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.789289 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f" exitCode=0 Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.789338 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.811341 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.825104 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.842777 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.853979 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.869187 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.873966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.874046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.874057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.874074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.874086 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.886351 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.936749 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.972331 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:06Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.979496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.979518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.979525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.979538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:06 crc kubenswrapper[4926]: I1122 10:40:06.979547 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:06Z","lastTransitionTime":"2025-11-22T10:40:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.007987 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.049281 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.081807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.081842 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.081854 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.081869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.081904 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.094568 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.134612 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.174475 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.185192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.185235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.185244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.185263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.185273 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.209694 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.254451 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.288839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.288910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.288928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.288950 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.288966 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.391779 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.391815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.391824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.391843 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.391858 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.494954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.495031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.495044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.495067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.495079 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.581031 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:07 crc kubenswrapper[4926]: E1122 10:40:07.581267 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.599263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.599353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.599378 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.599410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.599437 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.702331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.702384 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.702394 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.702410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.702419 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.804656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.805081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.805152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.805180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.805197 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.807208 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.807641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.808019 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.808241 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.816838 4926 generic.go:334] "Generic (PLEG): container finished" podID="fa95257d-7464-4038-b2f3-aa795e4ac425" containerID="ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19" exitCode=0 Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.816938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerDied","Data":"ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.824646 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.843821 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.845062 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.845480 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.860289 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.873853 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.889055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.904854 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.908485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.908532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.908549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.908572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.908586 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:07Z","lastTransitionTime":"2025-11-22T10:40:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.917613 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.927606 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.940244 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.954491 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.968230 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:07 crc kubenswrapper[4926]: I1122 10:40:07.983700 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:07Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.008831 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.013063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.013101 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.013112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.013129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.013140 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.022596 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.043348 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.056647 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.075198 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.087164 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.097876 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.108500 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.116005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.116041 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.116050 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.116064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.116073 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.118447 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.128117 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.169194 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.197085 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.197208 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.197240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.197337 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.197373 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.197361306 +0000 UTC m=+36.498966593 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.197605 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.197595932 +0000 UTC m=+36.499201219 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.197641 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.197663 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.197656803 +0000 UTC m=+36.499262090 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.212087 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.218729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.218754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.218764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.218779 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.218789 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.257977 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.289387 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.298331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.298375 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298493 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298509 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298519 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298568 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.298552034 +0000 UTC m=+36.600157321 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298572 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298607 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298621 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.298674 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.298658386 +0000 UTC m=+36.600263673 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.320825 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.320864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.320876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.320918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.320930 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.329447 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.378633 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.414381 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.423790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.423838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.423850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.423869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.423886 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.461460 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.526527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.526557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.526566 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.526577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.526586 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.581383 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.581567 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.581970 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.582198 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.629507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.629551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.629564 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.629581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.629593 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.731754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.731796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.731807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.731822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.731832 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.825361 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" event={"ID":"fa95257d-7464-4038-b2f3-aa795e4ac425","Type":"ContainerStarted","Data":"b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.834886 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.834954 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.834965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.834983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.834995 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.849793 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.865625 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.866247 4926 scope.go:117] "RemoveContainer" containerID="e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a" Nov 22 10:40:08 crc kubenswrapper[4926]: E1122 10:40:08.866386 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.869053 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.892314 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.907141 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.917600 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.928310 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937385 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.937633 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:08Z","lastTransitionTime":"2025-11-22T10:40:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.950128 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.967832 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.980638 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:08 crc kubenswrapper[4926]: I1122 10:40:08.994104 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:08Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.006679 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.018158 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.030470 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.039949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.040051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.040070 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.040128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.040148 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.051983 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:09Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.142708 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.142812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.142871 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.142947 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.142974 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.246568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.246639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.246661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.246689 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.246711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.351243 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.351298 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.351321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.351341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.351366 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.455009 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.455065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.455077 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.455096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.455108 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.559326 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.559387 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.559398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.559416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.559429 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.581667 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:09 crc kubenswrapper[4926]: E1122 10:40:09.581821 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.661882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.661953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.661964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.661981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.661993 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.765260 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.765318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.765328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.765352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.765367 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.868492 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.868546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.868557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.868574 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.868586 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.971680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.971746 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.971762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.971782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:09 crc kubenswrapper[4926]: I1122 10:40:09.971795 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:09Z","lastTransitionTime":"2025-11-22T10:40:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.075347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.075421 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.075442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.075470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.075487 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.178719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.178782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.178800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.178824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.178843 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.220430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.220460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.220476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.220490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.220499 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.232735 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.235791 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.235821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.235834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.235850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.235862 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.250609 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.254309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.254342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.254354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.254371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.254382 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.265638 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.268946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.268977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.269005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.269020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.269029 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.280320 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.283123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.283152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.283160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.283172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.283183 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.294181 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.294376 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.295775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.295808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.295819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.295833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.295845 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.398555 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.398619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.398633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.398650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.398664 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.501273 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.501482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.501572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.501633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.501686 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.581492 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.581882 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.581442 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:10 crc kubenswrapper[4926]: E1122 10:40:10.582170 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.604827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.604939 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.604959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.605013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.605031 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.605050 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.624727 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.637923 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.653811 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.686449 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.701093 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.709727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.709768 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.709782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.709802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.709813 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.723854 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.738523 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.752437 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.765302 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.776049 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.788193 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.800313 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.811757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.811822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.811833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.811853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.811866 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.818456 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.832944 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.835966 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/0.log" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.841376 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194" exitCode=1 Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.841413 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.842374 4926 scope.go:117] "RemoveContainer" containerID="ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.863488 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"oval\\\\nI1122 10:40:10.313331 6165 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:10.313336 6165 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:10.313363 6165 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:10.313531 6165 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 10:40:10.313538 6165 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:10.313534 6165 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:40:10.313555 6165 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:40:10.313595 6165 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:10.313647 6165 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:10.313690 6165 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:10.313722 6165 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:10.313737 6165 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:10.313796 6165 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:10.313849 6165 factory.go:656] Stopping watch factory\\\\nI1122 10:40:10.313891 6165 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:40:10.313806 6165 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.896431 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.909009 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.915424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.915481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.915493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.915512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.915526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:10Z","lastTransitionTime":"2025-11-22T10:40:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.921286 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.936064 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.949954 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.960867 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.972436 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.983112 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:10 crc kubenswrapper[4926]: I1122 10:40:10.996521 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.010182 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.020302 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.020332 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.020341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.020356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.020365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.023433 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.037056 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.048893 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.064805 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.122445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.122481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.122493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.122508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.122521 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.224844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.225172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.225305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.225468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.225595 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.328485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.328558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.328582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.328611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.328634 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.432042 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.432097 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.432111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.432130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.432146 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.534390 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.534437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.534448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.534462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.534472 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.581256 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:11 crc kubenswrapper[4926]: E1122 10:40:11.581388 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.636467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.636518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.636530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.636550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.636562 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.739691 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.740133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.740208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.740344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.740417 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.843915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.843972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.843981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.843996 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.844005 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.847586 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/0.log" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.849945 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.850338 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.864623 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.873538 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.891531 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.903062 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.916972 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.930638 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.944163 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.946251 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.946287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.946299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.946319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.946331 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:11Z","lastTransitionTime":"2025-11-22T10:40:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.956511 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.969104 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:11 crc kubenswrapper[4926]: I1122 10:40:11.981609 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.005680 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.024116 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.048983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.049219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.049293 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.049361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.049439 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.054057 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"oval\\\\nI1122 10:40:10.313331 6165 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:10.313336 6165 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:10.313363 6165 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:10.313531 6165 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 10:40:10.313538 6165 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:10.313534 6165 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:40:10.313555 6165 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:40:10.313595 6165 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:10.313647 6165 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:10.313690 6165 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:10.313722 6165 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:10.313737 6165 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:10.313796 6165 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:10.313849 6165 factory.go:656] Stopping watch factory\\\\nI1122 10:40:10.313891 6165 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:40:10.313806 6165 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.067677 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.083325 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.152385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.152438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.152454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.152475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.152506 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.254546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.254631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.254650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.254682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.254706 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.358375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.358469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.358492 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.358521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.358543 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.461433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.461490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.461500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.461516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.461530 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.564635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.564697 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.564710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.564728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.564740 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.581508 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.581605 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:12 crc kubenswrapper[4926]: E1122 10:40:12.581757 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:12 crc kubenswrapper[4926]: E1122 10:40:12.581922 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.668049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.668094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.668106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.668123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.668137 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.770530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.770627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.770639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.770662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.770675 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.856126 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/1.log" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.856945 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/0.log" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.860449 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5" exitCode=1 Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.860510 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.860571 4926 scope.go:117] "RemoveContainer" containerID="ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.861586 4926 scope.go:117] "RemoveContainer" containerID="c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5" Nov 22 10:40:12 crc kubenswrapper[4926]: E1122 10:40:12.861842 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.873236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.873296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.873306 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.873325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.873338 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.885636 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.900612 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.918812 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"oval\\\\nI1122 10:40:10.313331 6165 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:10.313336 6165 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:10.313363 6165 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:10.313531 6165 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 10:40:10.313538 6165 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:10.313534 6165 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:40:10.313555 6165 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:40:10.313595 6165 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:10.313647 6165 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:10.313690 6165 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:10.313722 6165 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:10.313737 6165 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:10.313796 6165 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:10.313849 6165 factory.go:656] Stopping watch factory\\\\nI1122 10:40:10.313891 6165 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:40:10.313806 6165 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.931484 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.946658 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2"] Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.947130 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.949715 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.952200 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.952214 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.964130 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.978159 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.978229 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.978254 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5t6w\" (UniqueName: \"kubernetes.io/projected/40323bfa-0937-4581-a5f5-bbfe1de066eb-kube-api-access-w5t6w\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.978290 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980081 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.980464 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:12Z","lastTransitionTime":"2025-11-22T10:40:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:12 crc kubenswrapper[4926]: I1122 10:40:12.991321 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.008154 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.027284 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.045645 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.069834 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.079044 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.079130 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5t6w\" (UniqueName: \"kubernetes.io/projected/40323bfa-0937-4581-a5f5-bbfe1de066eb-kube-api-access-w5t6w\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.079157 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.079466 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.080445 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.080500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40323bfa-0937-4581-a5f5-bbfe1de066eb-env-overrides\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.083080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.083180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.083211 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.083244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.083267 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.088787 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40323bfa-0937-4581-a5f5-bbfe1de066eb-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.091106 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.109328 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5t6w\" (UniqueName: \"kubernetes.io/projected/40323bfa-0937-4581-a5f5-bbfe1de066eb-kube-api-access-w5t6w\") pod \"ovnkube-control-plane-749d76644c-cqsd2\" (UID: \"40323bfa-0937-4581-a5f5-bbfe1de066eb\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.110128 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.134600 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.154837 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.174828 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.186055 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.186116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.186128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.186151 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.186164 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.196584 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.213116 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.232689 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.246745 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.261505 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" Nov 22 10:40:13 crc kubenswrapper[4926]: W1122 10:40:13.293186 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40323bfa_0937_4581_a5f5_bbfe1de066eb.slice/crio-9cd6c0d883d0166db78e0aea9daca047528006acb80dc9cee1ca229d9c9bced4 WatchSource:0}: Error finding container 9cd6c0d883d0166db78e0aea9daca047528006acb80dc9cee1ca229d9c9bced4: Status 404 returned error can't find the container with id 9cd6c0d883d0166db78e0aea9daca047528006acb80dc9cee1ca229d9c9bced4 Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.293322 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.293339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.293347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.293361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.293369 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.302032 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.326004 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.339168 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.353028 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.363996 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.382983 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.394314 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.395080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.395104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.395115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.395132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.395142 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.411172 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"oval\\\\nI1122 10:40:10.313331 6165 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:10.313336 6165 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:10.313363 6165 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:10.313531 6165 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 10:40:10.313538 6165 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:10.313534 6165 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:40:10.313555 6165 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:40:10.313595 6165 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:10.313647 6165 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:10.313690 6165 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:10.313722 6165 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:10.313737 6165 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:10.313796 6165 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:10.313849 6165 factory.go:656] Stopping watch factory\\\\nI1122 10:40:10.313891 6165 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:40:10.313806 6165 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.421340 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.432537 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.496854 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.496903 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.496911 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.496927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.496936 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.581247 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:13 crc kubenswrapper[4926]: E1122 10:40:13.581387 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.599213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.599286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.599299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.599315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.599328 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.702155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.702193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.702202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.702216 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.702225 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.804518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.804582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.804594 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.804615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.804628 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.866538 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" event={"ID":"40323bfa-0937-4581-a5f5-bbfe1de066eb","Type":"ContainerStarted","Data":"2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.866629 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" event={"ID":"40323bfa-0937-4581-a5f5-bbfe1de066eb","Type":"ContainerStarted","Data":"ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.866657 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" event={"ID":"40323bfa-0937-4581-a5f5-bbfe1de066eb","Type":"ContainerStarted","Data":"9cd6c0d883d0166db78e0aea9daca047528006acb80dc9cee1ca229d9c9bced4"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.869029 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/1.log" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.874349 4926 scope.go:117] "RemoveContainer" containerID="c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5" Nov 22 10:40:13 crc kubenswrapper[4926]: E1122 10:40:13.874612 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.885486 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.899097 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.907501 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.907576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.907602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.907634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.907661 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:13Z","lastTransitionTime":"2025-11-22T10:40:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.922048 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.935714 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.948110 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.968312 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ffc0bf6be79f2bd37f0c4d453c60991c96473d765cdb8012051321114120f194\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:10Z\\\",\\\"message\\\":\\\"oval\\\\nI1122 10:40:10.313331 6165 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:10.313336 6165 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:10.313363 6165 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:10.313531 6165 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1122 10:40:10.313538 6165 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:10.313534 6165 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1122 10:40:10.313555 6165 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1122 10:40:10.313595 6165 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:10.313647 6165 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:10.313690 6165 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:10.313722 6165 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:10.313737 6165 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:10.313796 6165 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:10.313849 6165 factory.go:656] Stopping watch factory\\\\nI1122 10:40:10.313891 6165 ovnkube.go:599] Stopped ovnkube\\\\nI1122 10:40:10.313806 6165 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:13 crc kubenswrapper[4926]: I1122 10:40:13.980920 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.001432 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:13Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.010701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.010758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.010780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.010808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.010829 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.015377 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.030078 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.042931 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.060262 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.074956 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.088821 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.104295 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.113560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.113610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.113622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.113639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.113651 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.118075 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.136877 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.149165 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.163062 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.177052 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.191086 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.214610 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.216142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.216176 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.216189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.216204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.216216 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.228231 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.259573 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.273482 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.285864 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.306911 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.318991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.319048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.319068 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.319093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.319109 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.325527 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.339174 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.355244 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.373682 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.388344 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.421606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.421650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.421661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.421678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.421689 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.474146 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-jfbf4"] Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.474815 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: E1122 10:40:14.474962 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.492648 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.499057 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.499147 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cddxh\" (UniqueName: \"kubernetes.io/projected/c42b6f47-b1a4-4fee-8681-3b5288370323-kube-api-access-cddxh\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.524837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.524910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.524931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.524959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.524976 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.527817 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.547605 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.576481 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.580866 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:14 crc kubenswrapper[4926]: E1122 10:40:14.581098 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.580866 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:14 crc kubenswrapper[4926]: E1122 10:40:14.581508 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.595096 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.600520 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.600799 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cddxh\" (UniqueName: \"kubernetes.io/projected/c42b6f47-b1a4-4fee-8681-3b5288370323-kube-api-access-cddxh\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: E1122 10:40:14.600743 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:14 crc kubenswrapper[4926]: E1122 10:40:14.601212 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:15.101190549 +0000 UTC m=+35.402795846 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.610710 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.623057 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.626953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.627001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.627012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.627024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.627034 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.628203 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cddxh\" (UniqueName: \"kubernetes.io/projected/c42b6f47-b1a4-4fee-8681-3b5288370323-kube-api-access-cddxh\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.636627 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.648830 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.658802 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.670667 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.683548 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.698712 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.708806 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.722700 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.729136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.729181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.729193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.729209 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.729223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.734571 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.745258 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:14Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.831598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.831641 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.831651 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.831668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.831679 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.934106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.934138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.934149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.934164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:14 crc kubenswrapper[4926]: I1122 10:40:14.934175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:14Z","lastTransitionTime":"2025-11-22T10:40:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.036427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.036491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.036513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.036544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.036567 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.107047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:15 crc kubenswrapper[4926]: E1122 10:40:15.107264 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:15 crc kubenswrapper[4926]: E1122 10:40:15.107374 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:16.107342943 +0000 UTC m=+36.408948270 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.140378 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.140453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.140476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.140505 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.140527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.243795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.243846 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.243862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.243885 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.243933 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.347337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.347412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.347434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.347464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.347483 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.451506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.451564 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.451580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.451602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.451617 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.554667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.554728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.554745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.554770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.554787 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.581496 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:15 crc kubenswrapper[4926]: E1122 10:40:15.581727 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.657272 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.657318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.657341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.657365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.657381 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.760695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.760757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.760782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.760811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.760834 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.863718 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.863798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.863822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.863851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.863873 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.967235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.967306 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.967323 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.967348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:15 crc kubenswrapper[4926]: I1122 10:40:15.967365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:15Z","lastTransitionTime":"2025-11-22T10:40:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.069576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.069641 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.069666 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.069696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.069720 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.118328 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.118573 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.118675 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:18.118648117 +0000 UTC m=+38.420253594 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.172522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.172573 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.172589 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.172612 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.172628 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.219831 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.220057 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.220090 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.220061241 +0000 UTC m=+52.521666528 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.220178 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.220321 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.220312837 +0000 UTC m=+52.521918124 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.220639 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.220740 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.220765 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.220758539 +0000 UTC m=+52.522363826 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.274983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.275042 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.275051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.275064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.275097 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.322347 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.322462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322624 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322673 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322697 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322638 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322770 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.322746367 +0000 UTC m=+52.624351684 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322789 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322809 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.322878 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:32.32285715 +0000 UTC m=+52.624462477 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.377467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.377539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.377562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.377590 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.377610 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.480995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.481049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.481072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.481100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.481121 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.580936 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.581014 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.581089 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.581282 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.581378 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:16 crc kubenswrapper[4926]: E1122 10:40:16.581480 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.583660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.583699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.583716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.583738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.583754 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.686807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.686863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.686880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.686937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.686956 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.790716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.790764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.790781 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.790801 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.790819 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.894475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.894550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.894571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.894602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:16 crc kubenswrapper[4926]: I1122 10:40:16.894624 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:16Z","lastTransitionTime":"2025-11-22T10:40:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.004308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.005088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.005125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.005154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.005175 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.107837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.107881 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.107930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.107953 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.107969 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.211550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.211627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.211648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.211675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.211702 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.314727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.314798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.314821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.314849 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.314870 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.417764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.418145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.418334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.418580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.418711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.522109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.522163 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.522179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.522204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.522223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.581649 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:17 crc kubenswrapper[4926]: E1122 10:40:17.581838 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.625564 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.625635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.625654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.625677 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.625694 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.728526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.728931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.729170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.729500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.730035 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.834084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.834526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.834705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.834945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.835170 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.938462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.938509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.938522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.938541 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:17 crc kubenswrapper[4926]: I1122 10:40:17.938554 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:17Z","lastTransitionTime":"2025-11-22T10:40:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.041276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.041352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.041373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.041399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.041416 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.140068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:18 crc kubenswrapper[4926]: E1122 10:40:18.140240 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:18 crc kubenswrapper[4926]: E1122 10:40:18.140351 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:22.140324379 +0000 UTC m=+42.441929696 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.144293 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.144393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.144410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.144432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.144451 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.247425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.247542 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.247623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.247657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.247680 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.350967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.351024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.351039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.351060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.351075 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.454346 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.454403 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.454437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.454477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.454501 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.557373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.557444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.557467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.557493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.557511 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.581731 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.581957 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:18 crc kubenswrapper[4926]: E1122 10:40:18.582158 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.582205 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:18 crc kubenswrapper[4926]: E1122 10:40:18.582378 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:18 crc kubenswrapper[4926]: E1122 10:40:18.582574 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.660379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.660460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.660470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.660490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.660500 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.764312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.764361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.764370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.764388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.764401 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.867598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.867672 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.867690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.867716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.867733 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.971111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.971559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.971814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.972072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:18 crc kubenswrapper[4926]: I1122 10:40:18.972280 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:18Z","lastTransitionTime":"2025-11-22T10:40:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.074923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.074986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.075003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.075028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.075046 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.178129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.178187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.178205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.178227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.178245 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.280504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.280938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.281074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.281202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.281355 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.384762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.385141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.385290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.385442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.385574 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.493657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.493721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.493743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.493771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.493793 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.581999 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:19 crc kubenswrapper[4926]: E1122 10:40:19.582266 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.582434 4926 scope.go:117] "RemoveContainer" containerID="e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.596110 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.596345 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.596371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.596399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.596421 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.700174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.700238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.700263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.700294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.700317 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.803530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.803590 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.803610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.803638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.803656 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.898813 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.901864 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.903129 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.907595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.907655 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.907670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.907713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.907728 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:19Z","lastTransitionTime":"2025-11-22T10:40:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.916035 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.935600 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.956368 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.977305 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:19 crc kubenswrapper[4926]: I1122 10:40:19.989827 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:19Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.003916 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.010380 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.010435 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.010453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.010477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.010495 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.021626 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.038707 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.057509 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.070308 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.085843 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.113466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.113510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.113522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.113539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.113552 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.115084 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.134614 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.152909 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.167200 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.183112 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.196459 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.216395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.216448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.216460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.216477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.216491 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.319565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.319617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.319635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.319657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.319674 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.421940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.422005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.422022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.422047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.422066 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.526414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.526465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.526479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.526499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.526514 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.532051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.532092 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.532106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.532122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.532135 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.549464 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.554063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.554110 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.554127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.554144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.554157 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.567934 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.570808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.570839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.570852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.570869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.570879 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.581465 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.581571 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.581469 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.581465 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.581687 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.581749 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.585104 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.588354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.588392 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.588404 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.588422 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.588434 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.594692 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.599599 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.602689 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.602734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.602772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.602788 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.602800 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.608150 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.616513 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:20Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: E1122 10:40:20.616646 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.620481 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.628863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.628930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.628946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.628966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.628980 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.634829 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.645906 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.663655 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.678699 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.697241 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.708745 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.721364 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.732125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.732178 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.732192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.732209 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.732221 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.735773 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.748026 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.759178 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.770341 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.785828 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.797055 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.809792 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.835069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.835106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.835117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.835133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.835145 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.937728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.937798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.937808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.937844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:20 crc kubenswrapper[4926]: I1122 10:40:20.937855 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:20Z","lastTransitionTime":"2025-11-22T10:40:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.040277 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.040309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.040317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.040331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.040340 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.143047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.143109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.143126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.143153 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.143170 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.246637 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.246960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.247057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.247175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.247273 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.351064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.351142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.351162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.351189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.351208 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.454681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.454739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.454755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.454778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.454794 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.557734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.558085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.558166 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.558245 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.558345 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.581518 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:21 crc kubenswrapper[4926]: E1122 10:40:21.581682 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.662981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.663020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.663033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.663048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.663060 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.766195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.766631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.766733 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.766844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.766982 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.871760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.871822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.871844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.871869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.871879 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.974699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.974741 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.974751 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.974764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:21 crc kubenswrapper[4926]: I1122 10:40:21.974774 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:21Z","lastTransitionTime":"2025-11-22T10:40:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.077268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.077334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.077355 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.077385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.077409 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.180730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.180785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.180803 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.180826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.180843 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.182361 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:22 crc kubenswrapper[4926]: E1122 10:40:22.182570 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:22 crc kubenswrapper[4926]: E1122 10:40:22.182667 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:30.182639109 +0000 UTC m=+50.484244426 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.283382 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.283538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.283610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.283639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.283692 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.386819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.386929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.386949 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.386975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.386999 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.490912 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.490978 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.490994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.491019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.491036 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.581370 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.581388 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:22 crc kubenswrapper[4926]: E1122 10:40:22.581602 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.581436 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:22 crc kubenswrapper[4926]: E1122 10:40:22.581689 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:22 crc kubenswrapper[4926]: E1122 10:40:22.581984 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.593157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.593186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.593194 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.593208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.593217 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.695799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.695853 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.695871 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.695929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.695957 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.799031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.799084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.799100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.799123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.799141 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.902246 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.902313 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.902328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.902350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:22 crc kubenswrapper[4926]: I1122 10:40:22.902364 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:22Z","lastTransitionTime":"2025-11-22T10:40:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.005728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.005785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.005799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.005821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.005834 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.108586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.108629 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.108638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.108657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.108668 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.210684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.210735 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.210750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.210766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.210778 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.313573 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.313626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.313639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.313654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.313663 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.416317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.416377 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.416393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.416415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.416427 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.519161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.519214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.519231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.519252 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.519267 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.581289 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:23 crc kubenswrapper[4926]: E1122 10:40:23.581505 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.621546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.621578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.621587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.621601 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.621611 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.723799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.723845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.723863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.723879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.723911 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.827131 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.827198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.827221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.827249 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.827272 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.929413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.929470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.929482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.929498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:23 crc kubenswrapper[4926]: I1122 10:40:23.929514 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:23Z","lastTransitionTime":"2025-11-22T10:40:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.031824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.031870 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.031880 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.031922 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.031933 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.135012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.135106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.135126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.135148 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.135167 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.237971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.238029 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.238040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.238058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.238069 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.340956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.340996 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.341005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.341020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.341030 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.443661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.443713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.443724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.443744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.443758 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.546668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.546728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.546740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.546756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.546766 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.582018 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.582091 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.582038 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:24 crc kubenswrapper[4926]: E1122 10:40:24.582212 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:24 crc kubenswrapper[4926]: E1122 10:40:24.582290 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:24 crc kubenswrapper[4926]: E1122 10:40:24.582521 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.649859 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.649962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.649981 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.650010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.650031 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.754255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.754343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.754367 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.754398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.754420 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.857596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.857657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.857680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.857711 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.857723 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.960940 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.961025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.961048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.961076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:24 crc kubenswrapper[4926]: I1122 10:40:24.961094 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:24Z","lastTransitionTime":"2025-11-22T10:40:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.063367 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.063402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.063410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.063422 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.063430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.166759 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.166844 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.166865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.166926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.166950 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.274396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.274467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.274485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.274509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.274528 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.377290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.377348 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.377364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.377387 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.377403 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.480188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.480240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.480256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.480278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.480294 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.580928 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:25 crc kubenswrapper[4926]: E1122 10:40:25.581151 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.582937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.582989 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.583006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.583027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.583044 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.686217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.686278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.686295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.686319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.686338 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.789958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.790021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.790037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.790060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.790078 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.894293 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.894364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.894386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.894415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.894436 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.998051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.998124 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.998142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.998171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:25 crc kubenswrapper[4926]: I1122 10:40:25.998195 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:25Z","lastTransitionTime":"2025-11-22T10:40:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.101619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.101705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.101731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.101763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.101784 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.205520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.205587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.205606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.205630 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.205650 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.308342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.308429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.308446 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.308476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.308495 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.412011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.412073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.412090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.412113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.412135 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.515407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.515480 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.515497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.515525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.515544 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.581701 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.581832 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:26 crc kubenswrapper[4926]: E1122 10:40:26.581923 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.581990 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:26 crc kubenswrapper[4926]: E1122 10:40:26.582086 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:26 crc kubenswrapper[4926]: E1122 10:40:26.582164 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.619064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.619132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.619154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.619181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.619204 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.721727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.721783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.721800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.721823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.721841 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.825445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.825586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.825619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.825650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.825671 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.928775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.928845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.928862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.928914 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:26 crc kubenswrapper[4926]: I1122 10:40:26.928933 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:26Z","lastTransitionTime":"2025-11-22T10:40:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.032296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.032371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.032393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.032422 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.032444 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.136044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.136105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.136122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.136149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.136168 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.239017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.239091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.239112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.239142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.239164 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.342337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.342964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.342986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.343004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.343017 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.446036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.446106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.446123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.446149 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.446168 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.549640 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.549704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.549720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.549743 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.549761 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.581410 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:27 crc kubenswrapper[4926]: E1122 10:40:27.581606 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.652799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.652864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.652882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.652941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.652965 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.756610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.756683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.756701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.756725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.756743 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.860308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.860370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.860393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.860424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.860447 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.963260 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.963336 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.963358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.963385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:27 crc kubenswrapper[4926]: I1122 10:40:27.963408 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:27Z","lastTransitionTime":"2025-11-22T10:40:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.066192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.066608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.066757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.067006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.067201 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.170831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.170934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.170959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.170988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.171009 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.273584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.273636 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.273644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.273656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.273666 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.376759 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.376804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.376819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.376835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.376846 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.478850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.478977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.479003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.479033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.479052 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581119 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581145 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:28 crc kubenswrapper[4926]: E1122 10:40:28.581217 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581566 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581551 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.581604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: E1122 10:40:28.581624 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:28 crc kubenswrapper[4926]: E1122 10:40:28.581673 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.582083 4926 scope.go:117] "RemoveContainer" containerID="c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.683943 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.683995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.684008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.684026 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.684038 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.786862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.786947 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.786966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.786992 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.787010 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.890098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.890134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.890146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.890169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.890180 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.936808 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/1.log" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.945982 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.947210 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.973218 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.992828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.992860 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.992867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.992879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.992907 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:28Z","lastTransitionTime":"2025-11-22T10:40:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:28 crc kubenswrapper[4926]: I1122 10:40:28.998773 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:28Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.045957 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.076510 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.090794 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.098584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.098628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.098639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.098657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.098668 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.108866 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.125948 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.137347 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.148245 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.157568 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.168626 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.179200 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.189593 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.200547 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.200584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.200592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.200606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.200614 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.202063 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.214862 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.226019 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.238937 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.302770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.302806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.302814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.302828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.302837 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.406381 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.406451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.406465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.406491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.406507 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.509802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.509877 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.509909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.509937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.509951 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.582021 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:29 crc kubenswrapper[4926]: E1122 10:40:29.582195 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.612710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.612754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.612763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.612781 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.612795 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.715257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.715294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.715305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.715322 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.715335 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.818257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.818333 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.818349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.818373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.818390 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.922327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.922400 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.922419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.922443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.922460 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:29Z","lastTransitionTime":"2025-11-22T10:40:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.956286 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/2.log" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.956760 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/1.log" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.960105 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" exitCode=1 Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.960140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa"} Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.960217 4926 scope.go:117] "RemoveContainer" containerID="c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.961436 4926 scope.go:117] "RemoveContainer" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" Nov 22 10:40:29 crc kubenswrapper[4926]: E1122 10:40:29.961753 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.974326 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:29 crc kubenswrapper[4926]: I1122 10:40:29.987051 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.001135 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:29Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.012674 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.022433 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.026644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.026720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.026737 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.026763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.026778 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.035761 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.053815 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.071053 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.093149 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.108683 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.130398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.130444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.130455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.130472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.130486 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.131532 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.162233 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.180928 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.206601 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.206802 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.206946 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:40:46.206859732 +0000 UTC m=+66.508465059 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.214496 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.230807 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.234937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.235094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.235158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.235195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.235228 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.249761 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.266474 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.338777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.338868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.338931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.338964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.338988 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.441764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.441826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.441850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.441872 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.441909 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.546484 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.546553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.546577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.546609 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.546631 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.581089 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.581309 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.581371 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.581462 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.581566 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.581642 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.607136 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.623670 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.644958 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.648632 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.648687 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.648700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.648720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.648733 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.664511 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.679072 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.690084 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.703178 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.714191 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.726477 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.746331 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.750501 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.750536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.750552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.750572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.750588 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.759188 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.779251 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.792622 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.809441 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c31c877de789dac072b3eff81eb5639254198dc2af1cc12ecc408c3272f8c2c5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:11Z\\\",\\\"message\\\":\\\"to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:11Z is after 2025-08-24T17:21:41Z]\\\\nI1122 10:40:11.880425 6326 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"e4e4203e-87c7-4024-930a-5d6bdfe2bdde\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-machine-api/machine-api-operator-webhook_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/machine-api-operator-webhook\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTi\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.825779 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.837245 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.852579 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.852621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.852635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.852654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.852667 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.853420 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.954992 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.955036 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.955053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.955074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.955092 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.972381 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/2.log" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.976335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.976378 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.976395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.976415 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.976432 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:30Z","lastTransitionTime":"2025-11-22T10:40:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.977071 4926 scope.go:117] "RemoveContainer" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" Nov 22 10:40:30 crc kubenswrapper[4926]: E1122 10:40:30.977455 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:30 crc kubenswrapper[4926]: I1122 10:40:30.995958 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.000597 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:30Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.006314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.006389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.006407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.006430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.006446 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.024735 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.029217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.029270 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.029288 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.029312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.029328 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.035667 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.047065 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.053481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.053607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.053728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.053764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.053781 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.055475 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.068312 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.072122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.072152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.072183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.072199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.072212 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.077410 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.085995 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:31Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.086353 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.087774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.087810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.087821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.087835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.087844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.090312 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.103625 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.116783 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.126151 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.136089 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.144084 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.155417 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.166351 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.181374 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.190518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.190556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.190567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.190584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.190596 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.192619 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.209868 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.220286 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.229085 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:31Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.293506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.293560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.293571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.293585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.293595 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.396357 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.396420 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.396442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.396509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.396532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.499386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.499445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.499463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.499485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.499504 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.581977 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:31 crc kubenswrapper[4926]: E1122 10:40:31.582115 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.602295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.602337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.602349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.602368 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.602382 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.704616 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.704678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.704701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.704731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.704754 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.806824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.806931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.806952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.806977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.806995 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.909708 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.909730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.909738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.909750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:31 crc kubenswrapper[4926]: I1122 10:40:31.909759 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:31Z","lastTransitionTime":"2025-11-22T10:40:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.011962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.012024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.012039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.012088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.012102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.115161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.115193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.115204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.115220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.115231 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.218067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.218146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.218170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.218204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.218224 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.228355 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.228467 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.228514 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.228586 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.228644 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:41:04.228611994 +0000 UTC m=+84.530217321 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.228671 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.228684 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:41:04.228668496 +0000 UTC m=+84.530273823 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.228736 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:41:04.228718277 +0000 UTC m=+84.530323574 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.321281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.321329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.321345 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.321368 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.321385 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.329072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.329195 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329281 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329317 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329336 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329371 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329395 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329413 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329421 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:41:04.329395262 +0000 UTC m=+84.631000589 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.329488 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:41:04.329454674 +0000 UTC m=+84.631059991 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.425504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.425573 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.425596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.425627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.425653 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.528869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.528958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.528979 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.529002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.529019 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.581940 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.582032 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.582169 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.582167 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.582315 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:32 crc kubenswrapper[4926]: E1122 10:40:32.582404 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.632373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.632464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.632499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.632536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.632559 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.735233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.735319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.735340 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.735365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.735384 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.838734 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.838772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.838784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.838801 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.838812 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.941646 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.941723 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.941739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.941769 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:32 crc kubenswrapper[4926]: I1122 10:40:32.941789 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:32Z","lastTransitionTime":"2025-11-22T10:40:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.044832 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.044946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.044967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.044988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.045004 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.147664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.147713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.147727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.147749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.147764 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.252882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.252995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.253010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.253053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.253067 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.355223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.355339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.355353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.355375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.355385 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.466729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.466780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.466795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.466815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.466828 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.570203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.570271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.570294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.570324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.570346 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.581604 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:33 crc kubenswrapper[4926]: E1122 10:40:33.581824 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.672364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.672418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.672432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.672449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.672461 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.775033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.775272 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.775376 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.775448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.775510 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.878031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.878074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.878086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.878103 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.878115 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.980001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.980066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.980090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.980115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:33 crc kubenswrapper[4926]: I1122 10:40:33.980133 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:33Z","lastTransitionTime":"2025-11-22T10:40:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.083043 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.083341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.083510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.083658 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.083798 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.186474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.186552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.186576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.186606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.186631 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.290373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.290437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.290454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.290479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.290503 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.392755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.392822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.392838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.392856 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.392868 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.495951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.496362 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.496585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.496818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.497110 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.531524 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.555494 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.573783 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.582042 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.582091 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:34 crc kubenswrapper[4926]: E1122 10:40:34.582182 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.582048 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:34 crc kubenswrapper[4926]: E1122 10:40:34.582374 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:34 crc kubenswrapper[4926]: E1122 10:40:34.582512 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.595579 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.600065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.600129 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.600146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.600173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.600191 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.607754 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.627474 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.641372 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.659158 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.671119 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.690702 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.703273 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.703311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.703321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.703335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.703344 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.708522 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.725114 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.740816 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.770698 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.789423 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.806553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.806605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.806625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.806651 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.806668 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.820558 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.834821 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.847285 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:34Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.909543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.909770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.909823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.909928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:34 crc kubenswrapper[4926]: I1122 10:40:34.909958 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:34Z","lastTransitionTime":"2025-11-22T10:40:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.012674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.012726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.012781 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.012806 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.012822 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.115556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.115591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.115600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.115612 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.115621 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.218456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.218498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.218510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.218527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.218540 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.321685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.321725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.321739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.321758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.321773 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.425117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.425158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.425171 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.425191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.425205 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.528464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.528551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.528577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.528611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.528634 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.581765 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:35 crc kubenswrapper[4926]: E1122 10:40:35.581972 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.631510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.631585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.631603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.631633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.631651 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.654137 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.674305 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.676497 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.693883 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.710780 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.730704 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.734754 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.734826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.734862 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.734931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.734959 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.748650 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.764580 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.786343 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.802588 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.820312 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.837769 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.837831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.837848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.837874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.837938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.842737 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.857646 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.890115 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.906793 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.934352 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.941466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.941528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.941562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.941593 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.941616 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:35Z","lastTransitionTime":"2025-11-22T10:40:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.954254 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.973193 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:35 crc kubenswrapper[4926]: I1122 10:40:35.995671 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:35Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.044312 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.044395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.044418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.044489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.044517 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.148230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.148287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.148307 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.148331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.148351 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.251672 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.251730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.251748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.251772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.251789 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.355231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.355331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.355359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.355397 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.355419 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.458325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.458390 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.458406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.458429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.458448 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.562229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.562299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.562321 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.562350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.562371 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.581192 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.581299 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:36 crc kubenswrapper[4926]: E1122 10:40:36.581436 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.581563 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:36 crc kubenswrapper[4926]: E1122 10:40:36.581766 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:36 crc kubenswrapper[4926]: E1122 10:40:36.581918 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.665736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.665789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.665807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.665830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.665847 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.768373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.768447 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.768472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.768502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.768523 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.872160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.872228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.872257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.872286 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.872308 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.975365 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.975433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.975451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.975475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:36 crc kubenswrapper[4926]: I1122 10:40:36.975493 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:36Z","lastTransitionTime":"2025-11-22T10:40:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.079264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.079319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.079328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.079341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.079350 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.182698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.182763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.182779 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.182804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.182825 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.285777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.285833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.285851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.285874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.285919 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.389264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.389338 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.389355 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.389380 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.389397 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.491710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.491778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.491801 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.491835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.491859 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.581718 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:37 crc kubenswrapper[4926]: E1122 10:40:37.581981 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.594838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.594882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.594917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.594972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.594987 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.698511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.698584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.698601 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.698629 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.698650 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.802067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.802137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.802157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.802219 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.802247 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.905760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.905841 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.905864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.905928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:37 crc kubenswrapper[4926]: I1122 10:40:37.905955 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:37Z","lastTransitionTime":"2025-11-22T10:40:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.008822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.008927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.008948 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.008973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.008997 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.112290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.112357 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.112382 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.112411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.112435 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.215296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.215360 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.215383 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.215411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.215434 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.318869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.318966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.318988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.319012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.319029 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.422372 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.422455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.422476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.422499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.422516 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.525030 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.525115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.525137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.525168 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.525199 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.581038 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.581109 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.581046 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:38 crc kubenswrapper[4926]: E1122 10:40:38.581296 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:38 crc kubenswrapper[4926]: E1122 10:40:38.581427 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:38 crc kubenswrapper[4926]: E1122 10:40:38.581585 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.627588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.627663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.627685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.627707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.627725 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.730111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.730195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.730227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.730255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.730276 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.833384 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.833454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.833476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.833510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.833533 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.936364 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.936470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.936488 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.936515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:38 crc kubenswrapper[4926]: I1122 10:40:38.936535 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:38Z","lastTransitionTime":"2025-11-22T10:40:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.039426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.039546 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.039571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.039638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.039662 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.143476 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.143552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.143569 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.143595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.143612 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.246736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.246812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.246835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.246869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.246945 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.350120 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.350183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.350200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.350222 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.350239 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.453706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.453762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.453778 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.453800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.453817 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.562663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.562744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.562763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.562790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.562806 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.581330 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:39 crc kubenswrapper[4926]: E1122 10:40:39.581511 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.665777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.665847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.665868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.665926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.665945 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.769833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.769918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.769931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.769955 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.769969 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.873443 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.873531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.873556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.873587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.873606 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.977618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.977678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.977695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.977723 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:39 crc kubenswrapper[4926]: I1122 10:40:39.977741 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:39Z","lastTransitionTime":"2025-11-22T10:40:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.081140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.081212 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.081234 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.081268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.081290 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.185238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.185310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.185327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.185353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.185373 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.287951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.288010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.288024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.288041 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.288053 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.391762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.391805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.391823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.391846 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.391864 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.494196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.494510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.494532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.494558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.494577 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.580974 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.581065 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:40 crc kubenswrapper[4926]: E1122 10:40:40.581293 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.581562 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:40 crc kubenswrapper[4926]: E1122 10:40:40.581662 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:40 crc kubenswrapper[4926]: E1122 10:40:40.582169 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.599518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.599585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.599602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.599627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.599644 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.607194 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.625061 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.642730 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.660278 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.675487 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.697765 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.702085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.702155 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.702177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.702205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.702225 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.719332 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.735100 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.753824 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.766927 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.787583 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.801749 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.804867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.805050 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.805235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.805615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.806102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.824806 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.839619 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.861563 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.874330 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.886778 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.900251 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:40Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.908377 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.908455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.908479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.908512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:40 crc kubenswrapper[4926]: I1122 10:40:40.908537 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:40Z","lastTransitionTime":"2025-11-22T10:40:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.011509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.011587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.011619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.011649 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.011672 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.110471 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.110527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.110559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.110584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.110604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.134086 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:41Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.139783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.139847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.139874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.139933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.139958 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.160081 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:41Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.165915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.165980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.165992 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.166017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.166033 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.183775 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:41Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.190290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.190350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.190370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.190399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.190417 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.208833 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:41Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.217452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.217511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.217530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.217568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.217586 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.238063 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:41Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:41Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.238294 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.240917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.240986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.241004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.241035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.241054 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.344440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.344507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.344527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.344558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.344580 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.448858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.448941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.448961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.448985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.449001 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.551724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.552233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.552295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.552328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.552349 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.581998 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:41 crc kubenswrapper[4926]: E1122 10:40:41.582210 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.655153 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.655226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.655251 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.655274 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.655291 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.759161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.761451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.761744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.762162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.762363 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.866045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.866093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.866109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.866131 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.866149 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.968944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.968987 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.968997 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.969013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:41 crc kubenswrapper[4926]: I1122 10:40:41.969024 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:41Z","lastTransitionTime":"2025-11-22T10:40:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.071822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.072336 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.072539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.072689 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.072845 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.176451 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.176498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.176515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.176537 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.176554 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.279705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.280260 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.280595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.280878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.281168 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.385239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.385605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.385720 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.385815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.385947 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.488276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.488327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.488344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.488370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.488396 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.580978 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.581777 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:42 crc kubenswrapper[4926]: E1122 10:40:42.581930 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:42 crc kubenswrapper[4926]: E1122 10:40:42.581705 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.582483 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:42 crc kubenswrapper[4926]: E1122 10:40:42.582744 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.591040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.591373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.591641 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.591987 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.592310 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.696089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.696635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.696835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.697100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.697236 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.800879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.801017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.801039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.801066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.801086 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.908701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.908758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.908777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.908799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:42 crc kubenswrapper[4926]: I1122 10:40:42.908815 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:42Z","lastTransitionTime":"2025-11-22T10:40:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.012755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.012929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.013043 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.013128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.013158 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.116333 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.116391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.116407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.116430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.116447 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.219135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.219213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.219232 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.219257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.219274 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.322027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.322075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.322086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.322105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.322116 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.425783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.425847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.425865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.425915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.425932 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.529264 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.529318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.529331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.529349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.529365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.580921 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:43 crc kubenswrapper[4926]: E1122 10:40:43.581194 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.632771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.632834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.632852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.632875 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.632918 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.735944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.735990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.736007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.736028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.736044 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.838956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.839004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.839017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.839034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.839047 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.942376 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.942460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.942484 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.942511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:43 crc kubenswrapper[4926]: I1122 10:40:43.942532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:43Z","lastTransitionTime":"2025-11-22T10:40:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.045429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.045506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.045527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.045553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.045572 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.148489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.148549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.148568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.148591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.148609 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.251465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.251533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.251555 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.251584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.251605 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.355457 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.355502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.355513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.355530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.355542 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.457918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.457961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.457971 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.457988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.458002 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.560257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.560292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.560304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.560319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.560332 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.581675 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.581704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.581704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:44 crc kubenswrapper[4926]: E1122 10:40:44.581940 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:44 crc kubenswrapper[4926]: E1122 10:40:44.582029 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:44 crc kubenswrapper[4926]: E1122 10:40:44.582140 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.664046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.664117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.664141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.664170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.664197 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.766937 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.767390 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.767582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.767772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.768000 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.871382 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.872299 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.872568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.872772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.872991 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.975856 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.976373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.976578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.976769 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:44 crc kubenswrapper[4926]: I1122 10:40:44.977077 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:44Z","lastTransitionTime":"2025-11-22T10:40:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.079848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.079899 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.079910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.079925 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.079936 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.182654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.182690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.182699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.182714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.182723 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.284763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.284812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.284824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.284845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.284860 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.386990 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.387025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.387035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.387050 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.387060 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.490376 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.490425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.490438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.490456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.490468 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.581638 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:45 crc kubenswrapper[4926]: E1122 10:40:45.582084 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.582318 4926 scope.go:117] "RemoveContainer" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" Nov 22 10:40:45 crc kubenswrapper[4926]: E1122 10:40:45.582604 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.593230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.593287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.593324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.593356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.593378 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.696586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.697018 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.697200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.697343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.697473 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.800412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.800693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.800785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.800879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.801000 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.903305 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.903343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.903353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.903371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:45 crc kubenswrapper[4926]: I1122 10:40:45.903382 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:45Z","lastTransitionTime":"2025-11-22T10:40:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.006128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.006162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.006172 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.006186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.006197 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.108088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.108130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.108140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.108154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.108165 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.210423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.210466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.210478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.210495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.210506 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.291403 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:46 crc kubenswrapper[4926]: E1122 10:40:46.291613 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:46 crc kubenswrapper[4926]: E1122 10:40:46.291689 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:41:18.291662476 +0000 UTC m=+98.593267803 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.312581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.312626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.312642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.312667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.312684 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.415228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.415276 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.415292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.415314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.415334 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.518294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.518671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.518814 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.519040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.519219 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.581184 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.581250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.581305 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:46 crc kubenswrapper[4926]: E1122 10:40:46.581358 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:46 crc kubenswrapper[4926]: E1122 10:40:46.581417 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:46 crc kubenswrapper[4926]: E1122 10:40:46.581497 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.622858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.622908 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.622917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.622933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.622944 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.725019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.725075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.725086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.725105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.725119 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.828456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.828483 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.828491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.828504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.828514 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.931427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.931498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.931517 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.931543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:46 crc kubenswrapper[4926]: I1122 10:40:46.931562 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:46Z","lastTransitionTime":"2025-11-22T10:40:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.037644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.037689 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.037696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.037711 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.037721 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.140414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.140472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.140489 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.140515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.140532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.242191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.242223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.242237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.242250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.242259 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.344565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.344591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.344598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.344611 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.344619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.447112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.447143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.447154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.447170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.447182 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.549479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.549518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.549529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.549543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.549554 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.581255 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:47 crc kubenswrapper[4926]: E1122 10:40:47.581401 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.652221 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.652269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.652277 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.652292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.652302 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.755413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.755460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.755469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.755488 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.755497 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.858848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.858932 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.858945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.858962 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.858976 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.962712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.962763 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.962781 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.962805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:47 crc kubenswrapper[4926]: I1122 10:40:47.962822 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:47Z","lastTransitionTime":"2025-11-22T10:40:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.065252 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.065319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.065339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.065361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.065377 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.168526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.168584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.168603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.168644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.168678 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.271454 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.271495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.271505 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.271521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.271531 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.374590 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.374652 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.374668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.374691 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.374707 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.477236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.477284 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.477298 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.477341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.477357 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.580267 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.580353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.580371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.580424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.580441 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.581075 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.581174 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:48 crc kubenswrapper[4926]: E1122 10:40:48.581254 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.581311 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:48 crc kubenswrapper[4926]: E1122 10:40:48.581380 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:48 crc kubenswrapper[4926]: E1122 10:40:48.581438 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.683113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.683189 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.683215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.683246 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.683272 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.785638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.785679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.785690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.785706 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.785718 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.888727 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.888789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.888802 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.888821 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.888832 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.991567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.991642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.991656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.991698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:48 crc kubenswrapper[4926]: I1122 10:40:48.991712 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:48Z","lastTransitionTime":"2025-11-22T10:40:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.045452 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/0.log" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.045633 4926 generic.go:334] "Generic (PLEG): container finished" podID="36de2843-6491-4c54-b624-c4a3d328c164" containerID="13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08" exitCode=1 Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.045688 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerDied","Data":"13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.046072 4926 scope.go:117] "RemoveContainer" containerID="13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.068554 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.088841 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.096255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.096643 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.096667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.096695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.096718 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.108477 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.121423 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.134227 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.146412 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:49Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.159850 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.169301 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.180357 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.190987 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.199370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.199402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.199412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.199427 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.199436 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.204526 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.213999 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.226374 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.237666 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.248663 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.259878 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.301571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.301615 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.301626 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.301640 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.301649 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.302588 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.325322 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:49Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.404755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.404810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.404822 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.404835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.404844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.507048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.507080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.507089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.507102 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.507112 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.581066 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:49 crc kubenswrapper[4926]: E1122 10:40:49.581228 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.609690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.609748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.609766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.609788 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.609806 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.712707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.712752 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.712764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.712777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.712788 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.815837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.815904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.815917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.815935 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.815948 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.918474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.918521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.918536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.918554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:49 crc kubenswrapper[4926]: I1122 10:40:49.918567 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:49Z","lastTransitionTime":"2025-11-22T10:40:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.021774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.021852 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.021879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.021946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.021963 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.051789 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/0.log" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.051859 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerStarted","Data":"954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.070870 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.082751 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.100699 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.112722 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.123485 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.124562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.124597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.124607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.124620 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.124630 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.137472 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.149666 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.159933 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.173116 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.186560 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.199340 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.209786 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.223797 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.226455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.226486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.226497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.226514 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.226528 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.234819 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.248567 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.259089 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.275673 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.284116 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.329158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.329195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.329203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.329217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.329226 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.431076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.431112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.431124 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.431139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.431150 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.533434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.533504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.533539 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.533575 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.533598 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.581284 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.581333 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:50 crc kubenswrapper[4926]: E1122 10:40:50.581421 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.581454 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:50 crc kubenswrapper[4926]: E1122 10:40:50.581504 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:50 crc kubenswrapper[4926]: E1122 10:40:50.581619 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.595114 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.607400 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.619992 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.633252 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.635961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.636004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.636021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.636043 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.636058 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.643555 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.656123 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.668476 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.689575 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.699392 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.713614 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.725751 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738806 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.738871 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.751962 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.769051 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.790279 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.800875 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.816176 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.825955 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:50Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.840728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.840777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.840790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.840808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.840820 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.942664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.942716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.942728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.942744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:50 crc kubenswrapper[4926]: I1122 10:40:50.942755 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:50Z","lastTransitionTime":"2025-11-22T10:40:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.045064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.045096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.045106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.045120 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.045131 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.147613 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.147658 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.147671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.147692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.147703 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.249636 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.249714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.249747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.249774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.249790 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.351419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.351460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.351471 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.351485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.351496 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.401128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.401190 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.401212 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.401237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.401252 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.416173 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.420509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.420584 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.420602 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.420623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.420639 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.438242 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.447318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.447384 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.447410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.447439 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.447460 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.469278 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.473082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.473141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.473163 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.473191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.473225 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.488766 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.492141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.492190 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.492207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.492227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.492243 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.512415 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:51Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.512877 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.514710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.514819 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.514924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.515015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.515085 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.581559 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:51 crc kubenswrapper[4926]: E1122 10:40:51.581925 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.617440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.617709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.617797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.617921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.618191 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.721749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.721800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.721811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.721829 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.721841 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.823604 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.823915 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.824001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.824091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.824183 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.927083 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.927151 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.927173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.927198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:51 crc kubenswrapper[4926]: I1122 10:40:51.927213 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:51Z","lastTransitionTime":"2025-11-22T10:40:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.030226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.030641 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.030738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.030868 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.031027 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.134362 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.134402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.134414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.134432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.134442 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.237377 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.237503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.237516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.237531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.237540 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.340325 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.340375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.340388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.340407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.340421 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.443121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.443158 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.443169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.443185 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.443196 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.546054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.546486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.546660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.547032 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.547403 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.582030 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:52 crc kubenswrapper[4926]: E1122 10:40:52.582156 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.582372 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:52 crc kubenswrapper[4926]: E1122 10:40:52.582453 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.582672 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:52 crc kubenswrapper[4926]: E1122 10:40:52.582742 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.651982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.652257 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.652341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.652432 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.652522 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.755597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.756022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.756237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.756450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.756666 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.860163 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.861263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.861410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.861574 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.861658 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.964429 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.964480 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.964498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.964518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:52 crc kubenswrapper[4926]: I1122 10:40:52.964534 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:52Z","lastTransitionTime":"2025-11-22T10:40:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.066162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.066193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.066201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.066214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.066222 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.169873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.169959 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.169978 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.170004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.170025 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.273766 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.274049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.274156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.274287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.274414 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.376998 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.377035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.377044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.377058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.377066 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.479188 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.479398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.479491 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.479565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.479622 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.580950 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:53 crc kubenswrapper[4926]: E1122 10:40:53.581448 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.582481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.582521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.582535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.582554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.582566 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.684620 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.684655 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.684666 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.684682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.684703 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.787341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.787535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.787642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.787721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.787900 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.890093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.890147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.890165 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.890187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.890204 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.993520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.993582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.993599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.993622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:53 crc kubenswrapper[4926]: I1122 10:40:53.993638 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:53Z","lastTransitionTime":"2025-11-22T10:40:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.095462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.095493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.095503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.095518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.095527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.198134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.198688 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.198774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.198867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.198975 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.301840 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.302089 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.302159 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.302225 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.302283 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.405772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.405837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.405858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.405921 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.405949 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.509567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.509648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.509673 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.509702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.509725 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.581327 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.581468 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:54 crc kubenswrapper[4926]: E1122 10:40:54.581547 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:54 crc kubenswrapper[4926]: E1122 10:40:54.581646 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.581692 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:54 crc kubenswrapper[4926]: E1122 10:40:54.581855 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.612349 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.612532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.612589 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.612614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.612631 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.715646 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.715721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.715745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.715774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.715796 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.818423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.818477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.818495 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.818520 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.818539 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.921344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.921413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.921437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.921463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:54 crc kubenswrapper[4926]: I1122 10:40:54.921484 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:54Z","lastTransitionTime":"2025-11-22T10:40:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.025722 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.025760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.025771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.025785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.025796 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.128625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.128686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.128701 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.128728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.128744 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.231127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.231187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.231203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.231226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.231245 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.334630 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.334697 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.334713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.334736 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.334754 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.437728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.438186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.438397 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.438655 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.438829 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.541740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.541785 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.541796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.541810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.541818 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.581435 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:55 crc kubenswrapper[4926]: E1122 10:40:55.581639 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.645345 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.645394 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.645406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.645423 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.645435 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.749303 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.749622 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.749761 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.749919 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.750066 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.853503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.853558 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.853568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.853582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.853592 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.956995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.957037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.957047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.957062 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:55 crc kubenswrapper[4926]: I1122 10:40:55.957076 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:55Z","lastTransitionTime":"2025-11-22T10:40:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.059630 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.059680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.059694 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.059713 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.059727 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.162580 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.162675 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.162695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.162719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.162736 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.266077 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.266121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.266140 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.266163 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.266181 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.369818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.369927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.369945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.369967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.369985 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.472995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.473379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.473644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.473842 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.474074 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.577196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.577294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.577329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.577367 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.577399 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.581328 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:56 crc kubenswrapper[4926]: E1122 10:40:56.581558 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.581815 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.581940 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:56 crc kubenswrapper[4926]: E1122 10:40:56.582696 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:56 crc kubenswrapper[4926]: E1122 10:40:56.583104 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.583404 4926 scope.go:117] "RemoveContainer" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.680238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.680291 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.680309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.680330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.680346 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.783679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.783729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.783745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.783767 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.783782 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.887220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.887281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.887297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.887319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.887336 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.990793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.990834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.990847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.990864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:56 crc kubenswrapper[4926]: I1122 10:40:56.990876 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:56Z","lastTransitionTime":"2025-11-22T10:40:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.072731 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/2.log" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.075642 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.076138 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.093332 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.100200 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.127116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.127167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.127187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.127201 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.146218 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.183696 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.197591 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.209237 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.220524 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.229671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.229707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.229715 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.229729 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.229739 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.231900 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.244468 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.257039 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.269649 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.279193 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.294156 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.305017 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.320083 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.330969 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.332182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.332203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.332213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.332228 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.332238 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.345956 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.358513 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.372233 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:57Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.434291 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.434327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.434338 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.434354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.434365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.537154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.537206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.537218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.537232 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.537242 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.580861 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:57 crc kubenswrapper[4926]: E1122 10:40:57.581014 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.640668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.640744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.640770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.640801 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.640825 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.744278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.744333 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.744351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.744373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.744389 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.848387 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.848511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.848536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.848566 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.848588 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.951876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.951982 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.952005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.952034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:57 crc kubenswrapper[4926]: I1122 10:40:57.952051 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:57Z","lastTransitionTime":"2025-11-22T10:40:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.055723 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.055815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.055840 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.055873 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.055946 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.083118 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/3.log" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.084076 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/2.log" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.087965 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" exitCode=1 Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.088027 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.088113 4926 scope.go:117] "RemoveContainer" containerID="f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.088829 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:40:58 crc kubenswrapper[4926]: E1122 10:40:58.089119 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.118435 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.141577 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.158818 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.159358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.159412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.159431 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.159455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.159472 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.176051 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.191659 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.207468 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.221391 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.239482 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.253415 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.262956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.263007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.263020 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.263039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.263053 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.274418 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.287757 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.304073 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.316674 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.337140 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.357826 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.365316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.365522 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.365722 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.365864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.366013 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.379109 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.405298 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f289e2c0a1949f60197796b92dd61a9192993575c83f759f2030ee0558635eaa\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:29Z\\\",\\\"message\\\":\\\"8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1122 10:40:29.517517 6569 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1122 10:40:29.517528 6569 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1122 10:40:29.517565 6569 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 10:40:29.517564 6569 handler.go:208] Removed *v1.Node event handler 2\\\\nI1122 10:40:29.517583 6569 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 10:40:29.517597 6569 handler.go:208] Removed *v1.Node event handler 7\\\\nI1122 10:40:29.517615 6569 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 10:40:29.517621 6569 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1122 10:40:29.517625 6569 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1122 10:40:29.517631 6569 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 10:40:29.517641 6569 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1122 10:40:29.517653 6569 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1122 10:40:29.517930 6569 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 10:40:29.517976 6569 factory.go:656] Stopping watch factory\\\\nI1122 10:40:29.517989 6569 ovnkube.go:599] Stopped ovnkube\\\\nI1122 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:28Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:57Z\\\",\\\"message\\\":\\\"k-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2 openshift-ovn-kubernetes/ovnkube-node-z69nr openshift-dns/node-resolver-wqf9b openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-multus/multus-additional-cni-plugins-sr572]\\\\nI1122 10:40:57.541881 6924 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1122 10:40:57.541909 6924 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541916 6924 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541923 6924 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-sr572 in node crc\\\\nI1122 10:40:57.541929 6924 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572 after 0 failed attempt(s)\\\\nI1122 10:40:57.541933 6924 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541944 6924 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 10:40:57.541989 6924 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.420966 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:58Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.469807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.469875 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.469939 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.469970 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.469994 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.573218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.573531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.573775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.574177 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.574551 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.581569 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:40:58 crc kubenswrapper[4926]: E1122 10:40:58.581984 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.581813 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:40:58 crc kubenswrapper[4926]: E1122 10:40:58.582415 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.582064 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:40:58 crc kubenswrapper[4926]: E1122 10:40:58.583240 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.677620 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.677999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.678344 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.678565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.678768 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.782271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.782342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.782366 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.782397 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.782420 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.885993 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.886056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.886076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.886109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.886132 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.989878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.990049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.990071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.990094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:58 crc kubenswrapper[4926]: I1122 10:40:58.990113 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:58Z","lastTransitionTime":"2025-11-22T10:40:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.092633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.092726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.092748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.092817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.092844 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.095874 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/3.log" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.101711 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:40:59 crc kubenswrapper[4926]: E1122 10:40:59.102083 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.136123 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.158145 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.190827 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:57Z\\\",\\\"message\\\":\\\"k-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2 openshift-ovn-kubernetes/ovnkube-node-z69nr openshift-dns/node-resolver-wqf9b openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-multus/multus-additional-cni-plugins-sr572]\\\\nI1122 10:40:57.541881 6924 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1122 10:40:57.541909 6924 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541916 6924 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541923 6924 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-sr572 in node crc\\\\nI1122 10:40:57.541929 6924 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572 after 0 failed attempt(s)\\\\nI1122 10:40:57.541933 6924 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541944 6924 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 10:40:57.541989 6924 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.196055 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.196167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.196186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.196211 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.196230 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.210632 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.228213 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.246752 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.274087 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.287847 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.298983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.299026 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.299039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.299057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.299072 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.308956 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.327421 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.344640 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.358382 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.376581 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.391140 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.402327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.402637 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.402772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.402961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.403104 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.408617 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.423220 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.440933 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.452414 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:40:59Z is after 2025-08-24T17:21:41Z" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.506063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.506136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.506157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.506185 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.506206 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.581272 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:40:59 crc kubenswrapper[4926]: E1122 10:40:59.581453 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.609956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.610035 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.610057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.610085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.610107 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.712121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.712150 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.712157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.712170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.712178 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.815099 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.815145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.815154 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.815166 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.815174 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.918461 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.918538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.918551 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.918567 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:40:59 crc kubenswrapper[4926]: I1122 10:40:59.918577 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:40:59Z","lastTransitionTime":"2025-11-22T10:40:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.022560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.022648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.022664 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.022686 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.022703 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.126406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.126466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.126483 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.126508 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.126525 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.229935 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.229985 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.230001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.230024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.230043 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.333033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.333109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.333132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.333156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.333173 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.436646 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.436804 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.436837 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.436865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.436932 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.540071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.540113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.540123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.540138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.540149 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.581003 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.581027 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:00 crc kubenswrapper[4926]: E1122 10:41:00.581140 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.581198 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:00 crc kubenswrapper[4926]: E1122 10:41:00.581384 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:00 crc kubenswrapper[4926]: E1122 10:41:00.581448 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.598291 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.615780 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.630313 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.642927 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.642980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.642997 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.643021 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.643039 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.645644 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.667963 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.691066 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.715179 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.727496 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.745667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.747237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.747433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.747505 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.747561 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.748407 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:57Z\\\",\\\"message\\\":\\\"k-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2 openshift-ovn-kubernetes/ovnkube-node-z69nr openshift-dns/node-resolver-wqf9b openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-multus/multus-additional-cni-plugins-sr572]\\\\nI1122 10:40:57.541881 6924 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1122 10:40:57.541909 6924 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541916 6924 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541923 6924 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-sr572 in node crc\\\\nI1122 10:40:57.541929 6924 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572 after 0 failed attempt(s)\\\\nI1122 10:40:57.541933 6924 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541944 6924 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 10:40:57.541989 6924 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.763386 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.776633 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.789040 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.800139 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.809994 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.823728 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.835027 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849339 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849537 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849553 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.849563 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.860133 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:00Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.953057 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.953103 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.953115 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.953131 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:00 crc kubenswrapper[4926]: I1122 10:41:00.953143 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:00Z","lastTransitionTime":"2025-11-22T10:41:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.055836 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.055878 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.055995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.056011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.056020 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.159738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.159807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.159824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.159931 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.159967 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.263049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.263127 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.263153 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.263183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.263204 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.366656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.366732 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.366759 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.366789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.366811 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.470195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.470263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.470287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.470319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.470344 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.528218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.528281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.528302 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.528328 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.528346 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.551693 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:01Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.556614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.556645 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.556653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.556667 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.556677 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.574524 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:01Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.579291 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.579339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.579354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.579380 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.579396 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.581262 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.581485 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.595477 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:01Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.600377 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.600467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.600502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.600538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.600560 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.626397 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:01Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.631277 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.631359 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.631440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.631466 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.631492 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.646634 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:01Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:01 crc kubenswrapper[4926]: E1122 10:41:01.646869 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.649526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.649563 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.649574 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.649591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.649604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.752525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.752595 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.752614 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.752639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.752661 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.856363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.856433 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.856450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.856473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.856493 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.960200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.960289 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.960316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.960347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:01 crc kubenswrapper[4926]: I1122 10:41:01.960370 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:01Z","lastTransitionTime":"2025-11-22T10:41:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.063598 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.063647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.063663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.063685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.063703 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.166369 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.166444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.166458 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.166477 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.166491 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.269726 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.269777 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.269789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.269809 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.269821 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.373681 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.373739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.373758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.373783 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.373804 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.477519 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.477588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.477603 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.477628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.477644 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581085 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581169 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581175 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:02 crc kubenswrapper[4926]: E1122 10:41:02.581246 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: E1122 10:41:02.581412 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:02 crc kubenswrapper[4926]: E1122 10:41:02.581512 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581562 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.581604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.684767 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.684810 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.684823 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.684839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.684850 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.787744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.787795 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.787809 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.787826 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.787839 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.890855 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.890951 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.890970 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.890994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.891010 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.994048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.994081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.994090 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.994102 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:02 crc kubenswrapper[4926]: I1122 10:41:02.994110 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:02Z","lastTransitionTime":"2025-11-22T10:41:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.096340 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.096399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.096412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.096430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.096443 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.199530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.199597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.199610 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.199623 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.199634 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.303071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.303134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.303150 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.303169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.303185 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.406600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.406679 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.406698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.406722 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.406741 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.509860 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.509956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.509975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.510001 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.510022 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.581749 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:03 crc kubenswrapper[4926]: E1122 10:41:03.582064 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.613793 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.613842 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.613855 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.613879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.613913 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.716316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.716379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.716396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.716420 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.716436 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.819343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.819416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.819442 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.819472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.819495 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.923139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.923214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.923233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.923260 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:03 crc kubenswrapper[4926]: I1122 10:41:03.923282 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:03Z","lastTransitionTime":"2025-11-22T10:41:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.026304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.026363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.026381 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.026403 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.026422 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.129375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.129417 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.129428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.129445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.129457 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.232136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.232191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.232213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.232242 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.232260 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.287160 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.287373 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.287343902 +0000 UTC m=+148.588949229 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.287453 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.287523 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.287608 4926 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.287688 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.287669782 +0000 UTC m=+148.589275099 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.287692 4926 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.287753 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.287736914 +0000 UTC m=+148.589342231 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.334361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.334419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.334437 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.334463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.334481 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.388991 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.389089 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389142 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389165 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389181 4926 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389242 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.389223594 +0000 UTC m=+148.690828891 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389263 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389303 4926 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389327 4926 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.389419 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.38939077 +0000 UTC m=+148.690996097 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.437946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.438002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.438013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.438028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.438038 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.541097 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.541136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.541147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.541165 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.541177 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.584458 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.584620 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.584875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.585259 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.585286 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:04 crc kubenswrapper[4926]: E1122 10:41:04.585514 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.645261 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.645386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.645408 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.645430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.645446 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.748999 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.749058 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.749075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.749098 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.749117 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.854482 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.854753 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.854767 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.854784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.854798 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.957820 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.957938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.957974 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.958007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:04 crc kubenswrapper[4926]: I1122 10:41:04.958028 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:04Z","lastTransitionTime":"2025-11-22T10:41:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.061042 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.061118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.061142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.061170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.061190 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.164511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.164938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.165104 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.165318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.165467 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.268938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.269002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.269019 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.269048 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.269065 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.372238 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.372314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.372385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.372418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.372435 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.475076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.475124 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.475135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.475153 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.475164 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.577319 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.577375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.577394 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.577414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.577426 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.581758 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:05 crc kubenswrapper[4926]: E1122 10:41:05.581874 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.679690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.679724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.679732 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.679775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.679784 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.781582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.781617 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.781625 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.781639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.781647 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.884315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.884367 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.884385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.884406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.884422 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.987548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.987607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.987624 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.987650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:05 crc kubenswrapper[4926]: I1122 10:41:05.987669 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:05Z","lastTransitionTime":"2025-11-22T10:41:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.091025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.091122 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.091143 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.091167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.091186 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.194416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.194473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.194494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.194516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.194532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.298101 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.298168 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.298181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.298206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.298225 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.401137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.401197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.401218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.401245 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.401265 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.507846 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.507983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.508006 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.508034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.508060 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.581549 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.581568 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.581720 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:06 crc kubenswrapper[4926]: E1122 10:41:06.581935 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:06 crc kubenswrapper[4926]: E1122 10:41:06.582058 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:06 crc kubenswrapper[4926]: E1122 10:41:06.582342 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.612224 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.612381 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.612409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.612438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.612461 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.715410 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.715463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.715479 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.715500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.715517 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.818384 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.818450 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.818471 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.818500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.818519 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.921647 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.921707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.921725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.921746 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:06 crc kubenswrapper[4926]: I1122 10:41:06.921763 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:06Z","lastTransitionTime":"2025-11-22T10:41:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.024469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.024526 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.024548 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.024576 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.024597 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.127259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.127332 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.127350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.127371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.127387 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.230046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.230111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.230134 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.230161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.230184 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.333346 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.333412 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.333436 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.333464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.333487 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.436161 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.436215 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.436233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.436258 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.436278 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.539583 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.539642 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.539661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.539684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.539702 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.581553 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:07 crc kubenswrapper[4926]: E1122 10:41:07.582059 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.643337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.643411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.643430 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.643458 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.643478 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.746869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.746979 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.747003 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.747032 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.747055 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.850630 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.850684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.850699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.850723 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.850740 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.953835 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.953924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.953941 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.953963 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:07 crc kubenswrapper[4926]: I1122 10:41:07.953980 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:07Z","lastTransitionTime":"2025-11-22T10:41:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.056684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.056747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.056764 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.056786 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.056806 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.159929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.160004 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.160028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.160056 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.160079 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.264578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.264956 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.265174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.265355 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.265527 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.369395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.369812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.370027 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.370230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.370377 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.473958 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.474051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.474083 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.474130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.474156 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.577855 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.579513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.579676 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.579805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.579965 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.581347 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.581386 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:08 crc kubenswrapper[4926]: E1122 10:41:08.581526 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:08 crc kubenswrapper[4926]: E1122 10:41:08.581611 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.581671 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:08 crc kubenswrapper[4926]: E1122 10:41:08.581840 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.683025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.683464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.683644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.683828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.684074 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.786833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.786864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.786874 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.786919 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.786938 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.888653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.888917 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.889005 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.889100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.889176 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.994072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.994169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.994200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.994237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:08 crc kubenswrapper[4926]: I1122 10:41:08.994276 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:08Z","lastTransitionTime":"2025-11-22T10:41:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.097748 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.097815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.097830 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.097851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.097863 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.203081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.203170 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.203182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.203233 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.203248 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.307331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.307396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.307414 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.307438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.307458 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.410618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.410668 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.410685 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.410710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.410727 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.514162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.514226 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.514244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.514271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.514292 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.581334 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:09 crc kubenswrapper[4926]: E1122 10:41:09.581510 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.617596 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.617657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.617673 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.617695 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.617713 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.721118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.721175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.721200 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.721231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.721253 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.824513 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.824913 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.825118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.825524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.825702 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.928909 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.929236 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.929327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.929389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:09 crc kubenswrapper[4926]: I1122 10:41:09.929450 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:09Z","lastTransitionTime":"2025-11-22T10:41:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.032421 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.032705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.032861 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.033044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.033127 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.135704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.135772 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.135797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.135824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.135845 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.238828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.239079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.239117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.239141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.239158 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.340929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.340977 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.340991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.341013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.341030 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.444335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.444393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.444452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.444487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.444509 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.547230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.547279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.547297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.547315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.547330 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.581303 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:10 crc kubenswrapper[4926]: E1122 10:41:10.581423 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.581303 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:10 crc kubenswrapper[4926]: E1122 10:41:10.581679 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.581698 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:10 crc kubenswrapper[4926]: E1122 10:41:10.582101 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.593921 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.594811 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.615754 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.631384 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.646107 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.650144 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.650174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.650182 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.650197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.650205 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.660269 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.678821 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.697047 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.711175 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.725593 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.745297 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.752964 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.753180 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.753323 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.753460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.753587 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.758316 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.774039 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.796412 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.812769 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.842358 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:57Z\\\",\\\"message\\\":\\\"k-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2 openshift-ovn-kubernetes/ovnkube-node-z69nr openshift-dns/node-resolver-wqf9b openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-multus/multus-additional-cni-plugins-sr572]\\\\nI1122 10:40:57.541881 6924 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1122 10:40:57.541909 6924 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541916 6924 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541923 6924 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-sr572 in node crc\\\\nI1122 10:40:57.541929 6924 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572 after 0 failed attempt(s)\\\\nI1122 10:40:57.541933 6924 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541944 6924 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 10:40:57.541989 6924 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.856354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.856384 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.856392 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.856407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.856415 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.859350 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.875161 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.892583 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:10Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.959049 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.959100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.959116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.959138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:10 crc kubenswrapper[4926]: I1122 10:41:10.959158 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:10Z","lastTransitionTime":"2025-11-22T10:41:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.062186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.062231 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.062268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.062290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.062303 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.164933 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.164973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.164986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.165002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.165014 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.268306 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.268363 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.268375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.268393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.268405 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.372316 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.372391 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.372411 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.372438 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.372457 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.475811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.475857 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.475869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.475904 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.475920 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.578682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.578740 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.578757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.578782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.578801 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.581499 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.582331 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.582671 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.583037 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.682379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.682796 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.683077 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.683255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.683415 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.786447 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.786532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.786555 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.786588 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.786612 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.886712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.886851 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.886869 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.886923 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.886945 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.910385 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.916290 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.916352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.916371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.916395 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.916412 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.937512 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.946524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.947119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.947164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.947195 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.947227 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.969160 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.975687 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.975966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.976138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.976405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:11 crc kubenswrapper[4926]: I1122 10:41:11.976568 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:11Z","lastTransitionTime":"2025-11-22T10:41:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:11 crc kubenswrapper[4926]: E1122 10:41:11.996948 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:11Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.002582 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.002864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.003094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.003272 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.003411 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: E1122 10:41:12.025397 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:12Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:12 crc kubenswrapper[4926]: E1122 10:41:12.025578 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.028037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.028234 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.028345 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.028447 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.028544 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.131930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.132229 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.132356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.132486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.132578 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.236298 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.236351 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.236370 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.236394 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.236413 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.340116 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.340193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.340216 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.340248 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.340278 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.442654 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.442700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.442712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.442730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.442745 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.545928 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.545994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.546016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.546044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.546065 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.581250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.581343 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.581270 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:12 crc kubenswrapper[4926]: E1122 10:41:12.581483 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:12 crc kubenswrapper[4926]: E1122 10:41:12.581588 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:12 crc kubenswrapper[4926]: E1122 10:41:12.581739 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.649329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.649398 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.649428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.649468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.649493 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.752711 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.752780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.752803 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.752831 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.752852 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.855994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.856052 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.856074 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.856102 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.856124 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.959317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.959377 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.959396 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.959420 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:12 crc kubenswrapper[4926]: I1122 10:41:12.959438 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:12Z","lastTransitionTime":"2025-11-22T10:41:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.062635 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.062702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.062719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.062745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.062762 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.166696 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.166824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.166952 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.166991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.167070 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.270650 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.271082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.271347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.271561 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.271716 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.375160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.375543 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.375714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.375902 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.376151 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.479168 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.479528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.479700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.479872 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.480100 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.580953 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:13 crc kubenswrapper[4926]: E1122 10:41:13.581186 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.582725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.582790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.582813 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.582839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.582860 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.686157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.686208 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.686220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.686237 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.686269 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.789375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.789446 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.789468 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.789502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.789526 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.892768 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.892824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.892842 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.892864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.892889 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.995471 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.995533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.995544 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.995560 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:13 crc kubenswrapper[4926]: I1122 10:41:13.995573 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:13Z","lastTransitionTime":"2025-11-22T10:41:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.099022 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.099078 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.099094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.099113 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.099127 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.202156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.202235 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.202259 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.202292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.202315 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.304854 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.305167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.305258 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.305350 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.305450 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.413800 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.413875 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.414230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.414388 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.414418 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.517198 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.517253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.517269 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.517294 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.517310 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.582065 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:14 crc kubenswrapper[4926]: E1122 10:41:14.582573 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.582188 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.582143 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:14 crc kubenswrapper[4926]: E1122 10:41:14.583249 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:14 crc kubenswrapper[4926]: E1122 10:41:14.583586 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.620335 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.620549 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.620827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.621141 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.621376 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.724710 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.724798 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.724818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.724839 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.724856 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.827591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.827865 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.828045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.828186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.828334 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.932010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.932076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.932099 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.932126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:14 crc kubenswrapper[4926]: I1122 10:41:14.932145 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:14Z","lastTransitionTime":"2025-11-22T10:41:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.035080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.035464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.035627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.035919 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.036121 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.139128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.139502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.139670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.139864 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.140048 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.243064 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.243142 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.243164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.243197 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.243220 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.346467 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.346533 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.346557 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.346587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.346611 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.450261 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.450347 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.450372 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.450404 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.450430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.554055 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.554118 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.554135 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.554159 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.554178 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.580872 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:15 crc kubenswrapper[4926]: E1122 10:41:15.581148 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.657424 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.657762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.658023 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.658239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.658426 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.761591 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.761657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.761677 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.761702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.761718 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.865107 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.865169 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.865183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.865205 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.865221 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.968295 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.968619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.968816 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.969055 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:15 crc kubenswrapper[4926]: I1122 10:41:15.969207 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:15Z","lastTransitionTime":"2025-11-22T10:41:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.073025 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.073079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.073091 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.073105 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.073115 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.175699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.176076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.176204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.176356 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.176500 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.279607 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.279657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.279674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.279697 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.279715 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.382911 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.382974 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.382991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.383015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.383032 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.486324 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.486460 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.486488 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.486518 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.486540 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.581590 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.581723 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:16 crc kubenswrapper[4926]: E1122 10:41:16.581822 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:16 crc kubenswrapper[4926]: E1122 10:41:16.581942 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.582235 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:16 crc kubenswrapper[4926]: E1122 10:41:16.582515 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.589216 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.589270 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.589287 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.589308 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.589327 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.692426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.692481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.692499 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.692525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.692543 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.796698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.796753 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.796770 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.796794 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.796811 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.901065 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.901139 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.901164 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.901193 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:16 crc kubenswrapper[4926]: I1122 10:41:16.901215 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:16Z","lastTransitionTime":"2025-11-22T10:41:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.004409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.004462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.004481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.004507 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.004529 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.107277 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.107342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.107361 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.107386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.107405 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.210692 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.211066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.211250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.211402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.211532 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.314382 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.314473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.314509 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.314538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.314559 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.418402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.418475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.418496 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.418524 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.418545 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.521912 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.522266 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.522462 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.522697 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.522895 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.581808 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:17 crc kubenswrapper[4926]: E1122 10:41:17.582337 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.625402 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.625456 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.625470 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.625487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.625499 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.728002 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.728407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.728712 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.729047 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.729177 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.831707 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.832745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.833108 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.833416 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.833711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.937422 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.937463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.937474 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.937487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:17 crc kubenswrapper[4926]: I1122 10:41:17.937691 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:17Z","lastTransitionTime":"2025-11-22T10:41:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.040147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.040173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.040181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.040192 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.040202 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.143181 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.143239 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.143256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.143275 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.143292 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.245493 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.245550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.245568 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.245594 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.245613 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.345595 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:18 crc kubenswrapper[4926]: E1122 10:41:18.345838 4926 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:41:18 crc kubenswrapper[4926]: E1122 10:41:18.345995 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs podName:c42b6f47-b1a4-4fee-8681-3b5288370323 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:22.345961816 +0000 UTC m=+162.647567143 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs") pod "network-metrics-daemon-jfbf4" (UID: "c42b6f47-b1a4-4fee-8681-3b5288370323") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.347640 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.347688 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.347704 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.347725 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.347740 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.451067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.451125 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.451147 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.451179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.451203 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.554174 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.554250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.554279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.554309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.554357 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.580989 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.581041 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.581057 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:18 crc kubenswrapper[4926]: E1122 10:41:18.581203 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:18 crc kubenswrapper[4926]: E1122 10:41:18.581256 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:18 crc kubenswrapper[4926]: E1122 10:41:18.581415 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.657782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.658300 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.658469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.658669 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.658849 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.762702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.763191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.763419 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.763621 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.764046 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.869173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.869240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.869263 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.869297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.869322 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.973070 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.973240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.973268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.973341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:18 crc kubenswrapper[4926]: I1122 10:41:18.973367 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:18Z","lastTransitionTime":"2025-11-22T10:41:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.079663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.079721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.079738 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.079762 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.079780 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.182688 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.182756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.182771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.182789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.182802 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.285782 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.286280 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.286497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.286745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.287051 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.391100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.391848 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.392016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.392176 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.392309 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.495483 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.495550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.495571 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.495599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.495619 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.581774 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:19 crc kubenswrapper[4926]: E1122 10:41:19.582360 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.598481 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.598690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.598828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.599046 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.599197 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.702268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.702314 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.702329 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.702352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.702369 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.805531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.805608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.805634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.805663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.805687 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.909178 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.910111 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.910165 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.910194 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:19 crc kubenswrapper[4926]: I1122 10:41:19.910259 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:19Z","lastTransitionTime":"2025-11-22T10:41:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.013756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.013833 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.013855 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.013882 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.013937 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.117515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.117585 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.117605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.117628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.117649 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.220465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.220536 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.220559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.220587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.220605 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.323223 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.323288 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.323311 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.323341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.323365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.425337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.425407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.425426 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.425452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.425471 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.528128 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.528175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.528187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.528206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.528223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.581118 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.581127 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:20 crc kubenswrapper[4926]: E1122 10:41:20.581354 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.581382 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:20 crc kubenswrapper[4926]: E1122 10:41:20.581485 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:20 crc kubenswrapper[4926]: E1122 10:41:20.581602 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.603040 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.616553 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d4977b14-85c3-4141-9b15-1768f09e8d27\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7f4461b3d09c647d71476b2c089ab47e93fe0e0efb27f8179e476ff3667447f6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5nwn7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-xr9nd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.630485 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.630573 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.630600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.630630 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.630654 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.637561 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sr572" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fa95257d-7464-4038-b2f3-aa795e4ac425\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b2dd6b2a2fe640f28f973df40cf7a31e3cba962a0d2aec796c6fcfa3892adce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0a7d49a66e96b6f535cae7cd657acf64164c67b52df055b133183ef25644863d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://53833aaa04a836cea1e6539fd089b91d92bec1e9c1037d036a09fa83a95b45da\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://80eeb7fb332eda9b9472092de548160297c4abdf76a929904fcf3321e992c230\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:04Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6293e4f193241ddbe2b4e93ceee670330e9e6cf86889fc9b049a7195da496a28\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ffd06bbdeee7c23dde48a03a975b5a86b258d8f95281abd78c8ce459155bb3f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:06Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ff5dbca8ebeaee6449c5eadf548e32151cc59875e405a574c1f2e0677b40bb19\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hjkwg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sr572\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.649354 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c42b6f47-b1a4-4fee-8681-3b5288370323\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:14Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cddxh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:14Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jfbf4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.668672 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8bc26a84-5939-4708-97f7-36fb5b1d1f27\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bdc617bc3d316106b3e66df8521ae9165a90df081b92c4a6c167895006a667a4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8bdb9d01c359b36d5ea1517197ab1db0746267b371b5aaf6ae9af7157af6f76\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f92968d6a164ebcee515ead5d8704ab04430b65f29cec21c10cecf2b99079737\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://99cddd2a619c5ea76699afd981ea6b0ccab8df382e64f7f9b5399ac9091da32a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4845e8b99f2def354e4a055d407495a578db4994aef6fb1c70b91760bbf4e2a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"message\\\":\\\"le observer\\\\nW1122 10:40:01.221686 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1122 10:40:01.221835 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1122 10:40:01.223195 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3742639324/tls.crt::/tmp/serving-cert-3742639324/tls.key\\\\\\\"\\\\nI1122 10:40:01.845350 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1122 10:40:01.848213 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1122 10:40:01.848231 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1122 10:40:01.848251 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1122 10:40:01.848258 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1122 10:40:01.855383 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1122 10:40:01.855412 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855418 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1122 10:40:01.855423 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nI1122 10:40:01.855441 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1122 10:40:01.855456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1122 10:40:01.855461 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1122 10:40:01.855464 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1122 10:40:01.857125 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:55Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://59001d5969924d417da69f7b385e8ee904bb5289914c6f4a58156a80f2553abd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e4fdad471fe7d7332fa8c1fc1e4779b934b140f9c808e0af44f05808c56b3e42\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.686666 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"000da1af-72cf-48f8-a5f6-ba658a30f2f2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb848b9bd67cb4ffab830dcdca81d4ddcc4fdbdda65202dbfd4632f8fc0bdfc5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a34606ee4262c2a39b7e799d969d8a76024895265fba033ffdbbd036c9e80fe3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b5507d01d4acb2f96c9c8b4978d67033fe6e9f01a035089649879219c4e9902\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec673ef3eadee493ad9112c2b920087ff03986cb55933071e6be31f987ff8b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.706329 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20ee112074738980326d231b1680aa74c920ec797dbfe4752ebfdfb99695caeb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0a5e6c7d3f4dd4bbfdc58c50672c5187d32b136fb9e096c95e786193e1c773b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.728454 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"25bc94bb-a5d1-431c-9847-2f6a02997e25\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:57Z\\\",\\\"message\\\":\\\"k-operator-58b4c7f79c-55gtf openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2 openshift-ovn-kubernetes/ovnkube-node-z69nr openshift-dns/node-resolver-wqf9b openshift-kube-scheduler/openshift-kube-scheduler-crc openshift-multus/multus-additional-cni-plugins-sr572]\\\\nI1122 10:40:57.541881 6924 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1122 10:40:57.541909 6924 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541916 6924 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541923 6924 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-sr572 in node crc\\\\nI1122 10:40:57.541929 6924 obj_retry.go:386] Retry successful for *v1.Pod openshift-multus/multus-additional-cni-plugins-sr572 after 0 failed attempt(s)\\\\nI1122 10:40:57.541933 6924 default_network_controller.go:776] Recording success event on pod openshift-multus/multus-additional-cni-plugins-sr572\\\\nI1122 10:40:57.541944 6924 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1122 10:40:57.541989 6924 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:56Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:04Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-x999w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-z69nr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.732502 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.732529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.732540 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.732559 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.732571 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.741979 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40323bfa-0937-4581-a5f5-bbfe1de066eb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ed82455ff9e2721a5688857be775b66c293c17713ae1ce362da8eca9a0c52c24\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2335c0518813a704a36b683366f4b63c953afc0187416234b6682532298f80ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-w5t6w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:12Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-cqsd2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.754353 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69f999b4-0c05-453c-a516-ff0c2afdddf1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6bf989ac1f115799fd40c7d673163ebf9361ac54f90ae23d92bbfb4d07d14bb0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4752de8b3ce1c860578c5b60a69bdb23fcbf6d14241c87c1b719c047d81af9c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4752de8b3ce1c860578c5b60a69bdb23fcbf6d14241c87c1b719c047d81af9c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.775236 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f3bdd4c-e5c5-46f1-8f92-450d892ef2e2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b68f6c08d1ac65e51054d798dcb1e6affbf5f114ea3848b544d3093168e44150\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://adceb29cfdba4f70773eb38f81b2291080a8a25e6d6ffe7fbbb037118e0ac29f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://29acb771444281287208f7c5aecfe78ed6e70fe2940be31d423c011878bee6c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c37c77ac89f0469ff68b3d5dffc2af7d7b6997b8975abe244e302baf0f5bbacc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ed65ad0463b5aa72d13756891af20691bff705a94c67a55d45f71af2a88b01e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2f4c96d7281eda14ad3971ee27f9f0f401d12ba8b72e32b65bd9d4935182f980\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ebad6d17a869f5f58a7c43300ceace96025b58ca9621cb6bc23b45d69fe471b4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6075bb148352742b787b1e8df58fe76d06db89303a7c297d07446148532e92cb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T10:39:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T10:39:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.790483 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://136423f9eeb872be0b685dcfee2a81e5f4133ad09607dc53fdfa64d70e87ceab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.806638 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-c6w2q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"36de2843-6491-4c54-b624-c4a3d328c164\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T10:40:48Z\\\",\\\"message\\\":\\\"2025-11-22T10:40:02+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f\\\\n2025-11-22T10:40:02+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_9f86c420-ab6e-4737-b325-6c2a7026407f to /host/opt/cni/bin/\\\\n2025-11-22T10:40:03Z [verbose] multus-daemon started\\\\n2025-11-22T10:40:03Z [verbose] Readiness Indicator file check\\\\n2025-11-22T10:40:48Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-52vzl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-multus\"/\"multus-c6w2q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.825017 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2bff8ee1644ff7cd9bee279e5c641b5d9c55e0546fbc7d1f2330e4e86aee0c37\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.835015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.835053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.835067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.835087 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.835102 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.839725 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.852521 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.863548 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-4sppr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f66b576d-83a6-4919-a918-7b075f35881e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a164ad2484a9c0b09c8e6b174448dedfe84a017ada69b19429d90621540080\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jwtrf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:01Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-4sppr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.876941 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"19e2ae79-41cf-4653-aa92-cc410145852b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:39:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b4679473c2c11fecbd8496e1cbeb15b3a2e3fff5e5b5f127e0d1f1d7b3e5b70\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8066b411b165bb6f3aa9fac7c4f7d5687e58fba56ff2afd14c7a351ba3877483\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://96a716433f83f8041fc5f29383937a1b49d73701286739632a5c07a7e4a42077\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ff433d4efb9f77a47f79a39aa609b09a40a642f554e19b8fce0c84ad96a5e34\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:39:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:39:40Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.887921 4926 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wqf9b" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"385427b5-fb45-42dd-8ff8-a4c7cdad6157\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T10:40:01Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://06207f52a9095cf1830a6ad91952c7efa774aa15a2fca873dc09b6b995e26401\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T10:40:01Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-4rrk8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T10:40:00Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wqf9b\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:20Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.937638 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.937670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.937682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.937698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:20 crc kubenswrapper[4926]: I1122 10:41:20.937711 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:20Z","lastTransitionTime":"2025-11-22T10:41:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.040929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.040973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.040988 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.041012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.041065 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.144586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.144633 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.144644 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.144662 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.144676 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.247318 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.247405 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.247431 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.247464 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.247484 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.350313 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.350371 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.350389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.350413 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.350430 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.453079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.453137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.453156 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.453179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.453196 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.556393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.556444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.556459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.556490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.556507 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.581341 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:21 crc kubenswrapper[4926]: E1122 10:41:21.581527 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.659827 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.659876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.659910 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.659926 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.659937 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.762859 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.762944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.762961 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.762984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.763001 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.867137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.867214 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.867230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.867255 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.867271 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.970315 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.970375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.970385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.970407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:21 crc kubenswrapper[4926]: I1122 10:41:21.970421 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:21Z","lastTransitionTime":"2025-11-22T10:41:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.073749 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.073824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.073842 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.073867 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.073885 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.177218 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.177278 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.177301 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.177330 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.177353 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.280093 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.280227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.280250 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.280275 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.280330 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.314379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.314472 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.314498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.314527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.314550 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.336279 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.342152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.342201 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.342220 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.342243 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.342261 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.365396 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.370805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.370942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.370963 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.370984 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.371002 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.391838 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.397160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.397244 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.397272 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.397304 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.397325 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.415109 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.420385 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.420440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.420452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.420469 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.420481 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.437232 4926 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404548Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865348Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T10:41:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"aeb560ef-1eb5-4732-98e3-250b723cbd1b\\\",\\\"systemUUID\\\":\\\"16dcb71b-1e1f-4d77-bb74-17aa213c9052\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T10:41:22Z is after 2025-08-24T17:21:41Z" Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.437404 4926 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.439930 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.439994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.440011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.440034 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.440053 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.542660 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.542730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.542750 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.542780 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.542800 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.581112 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.581178 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.581260 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.581198 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.581331 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:22 crc kubenswrapper[4926]: E1122 10:41:22.581491 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.646137 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.646183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.646196 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.646213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.646227 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.749578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.749631 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.749648 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.749671 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.749692 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.853217 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.853281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.853297 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.853323 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.853339 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.955465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.955532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.955541 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.955556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:22 crc kubenswrapper[4926]: I1122 10:41:22.955565 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:22Z","lastTransitionTime":"2025-11-22T10:41:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.059401 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.059455 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.059473 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.059498 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.059516 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.162353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.162425 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.162447 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.162475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.162498 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.265386 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.265463 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.265486 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.265510 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.265525 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.368309 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.368375 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.368389 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.368406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.368417 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.471838 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.471913 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.471924 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.471942 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.471955 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.575619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.575680 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.575698 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.575721 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.575737 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.581333 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:23 crc kubenswrapper[4926]: E1122 10:41:23.581559 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.678337 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.678403 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.678417 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.678440 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.678457 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.781929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.781994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.782011 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.782039 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.782060 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.885073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.885138 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.885159 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.885184 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.885204 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.988010 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.988076 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.988084 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.988100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:23 crc kubenswrapper[4926]: I1122 10:41:23.988109 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:23Z","lastTransitionTime":"2025-11-22T10:41:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.091380 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.091436 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.091452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.091475 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.091491 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.194117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.194175 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.194191 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.194213 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.194232 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.297599 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.297653 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.297670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.297693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.297710 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.400945 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.401031 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.401066 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.401094 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.401114 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.504085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.504406 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.504504 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.504589 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.504666 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.581826 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:24 crc kubenswrapper[4926]: E1122 10:41:24.582017 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.582064 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:24 crc kubenswrapper[4926]: E1122 10:41:24.582594 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.582871 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.583096 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:41:24 crc kubenswrapper[4926]: E1122 10:41:24.583572 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-z69nr_openshift-ovn-kubernetes(25bc94bb-a5d1-431c-9847-2f6a02997e25)\"" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" Nov 22 10:41:24 crc kubenswrapper[4926]: E1122 10:41:24.583986 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.608136 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.608187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.608206 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.608227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.608245 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.711758 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.711824 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.711850 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.711879 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.711936 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.814846 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.814943 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.814966 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.814995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.815018 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.918449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.918506 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.918527 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.918554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:24 crc kubenswrapper[4926]: I1122 10:41:24.918578 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:24Z","lastTransitionTime":"2025-11-22T10:41:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.020755 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.020790 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.020799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.020812 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.020821 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.123858 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.123969 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.123987 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.124013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.124031 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.227529 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.227587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.227605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.227627 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.227645 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.330032 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.330088 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.330106 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.330130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.330147 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.433445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.433503 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.433525 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.433556 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.433578 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.536418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.536478 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.536500 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.536528 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.536549 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.580941 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:25 crc kubenswrapper[4926]: E1122 10:41:25.581099 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.639069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.639123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.639146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.639173 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.639192 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.742444 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.742797 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.743016 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.743203 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.743372 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.846946 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.847017 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.847037 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.847063 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.847080 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.950606 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.950658 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.950674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.950700 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:25 crc kubenswrapper[4926]: I1122 10:41:25.950715 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:25Z","lastTransitionTime":"2025-11-22T10:41:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.054204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.054317 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.054342 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.054373 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.054395 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.157808 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.157975 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.158045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.158073 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.158149 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.261452 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.261511 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.261531 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.261554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.261572 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.364565 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.364619 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.364634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.364657 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.364675 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.467938 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.467996 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.468013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.468040 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.468058 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.571608 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.571674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.571693 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.571717 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.571734 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.581126 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.581428 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:26 crc kubenswrapper[4926]: E1122 10:41:26.581707 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.581825 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:26 crc kubenswrapper[4926]: E1122 10:41:26.581975 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:26 crc kubenswrapper[4926]: E1122 10:41:26.582082 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.674577 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.674661 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.674682 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.674705 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.674766 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.777815 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.777918 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.777944 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.777973 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.777995 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.880262 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.880327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.880343 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.880366 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.880385 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.983227 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.983281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.983292 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.983310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:26 crc kubenswrapper[4926]: I1122 10:41:26.983323 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:26Z","lastTransitionTime":"2025-11-22T10:41:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.086728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.086792 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.086811 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.086836 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.086853 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.190082 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.190157 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.190179 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.190210 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.190231 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.292407 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.292449 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.292465 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.292487 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.292505 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.402202 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.402339 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.402358 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.402383 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.402400 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.505117 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.505271 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.505296 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.505327 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.505365 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.581324 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:27 crc kubenswrapper[4926]: E1122 10:41:27.581539 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.609441 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.609494 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.609512 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.609535 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.609557 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.712123 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.712186 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.712204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.712230 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.712253 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.816015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.816081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.816100 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.816126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.816146 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.918991 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.919060 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.919085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.919114 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:27 crc kubenswrapper[4926]: I1122 10:41:27.919139 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:27Z","lastTransitionTime":"2025-11-22T10:41:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.022013 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.022079 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.022095 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.022119 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.022138 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.125674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.125765 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.125789 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.125818 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.125841 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.228694 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.228756 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.228774 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.228799 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.228815 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.332199 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.332268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.332284 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.332310 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.332336 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.435448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.435530 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.435552 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.435581 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.435604 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.539341 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.539434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.539459 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.539490 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.539511 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.581261 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.581400 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.581406 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:28 crc kubenswrapper[4926]: E1122 10:41:28.581535 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:28 crc kubenswrapper[4926]: E1122 10:41:28.581699 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:28 crc kubenswrapper[4926]: E1122 10:41:28.581919 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.643167 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.643240 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.643256 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.643281 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.643300 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.746445 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.746497 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.746515 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.746538 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.746556 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.849600 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.849659 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.849678 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.849742 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.849765 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.952972 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.953028 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.953044 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.953071 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:28 crc kubenswrapper[4926]: I1122 10:41:28.953090 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:28Z","lastTransitionTime":"2025-11-22T10:41:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.055863 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.055968 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.055995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.056023 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.056043 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.159054 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.159109 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.159126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.159152 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.159168 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.261872 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.261960 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.261983 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.262012 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.262034 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.364939 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.364994 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.365015 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.365045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.365066 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.474803 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.474876 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.474934 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.474965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.474988 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.578268 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.578334 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.578352 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.578376 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.578392 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.581846 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:29 crc kubenswrapper[4926]: E1122 10:41:29.582321 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.681086 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.681160 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.681183 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.681207 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.681223 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.785080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.785145 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.785162 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.785187 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.785207 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.887965 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.888033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.888051 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.888075 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.888100 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.991253 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.991331 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.991354 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.991382 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:29 crc kubenswrapper[4926]: I1122 10:41:29.991406 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:29Z","lastTransitionTime":"2025-11-22T10:41:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.094569 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.094637 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.094658 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.094683 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.094702 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.197670 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.197728 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.197744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.197769 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.197787 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.300628 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.300674 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.300684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.300702 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.300713 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.403369 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.403418 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.403434 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.403453 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.403467 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.505663 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.505731 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.505747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.505771 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.505787 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.581910 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.581920 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:30 crc kubenswrapper[4926]: E1122 10:41:30.582085 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:30 crc kubenswrapper[4926]: E1122 10:41:30.582422 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.582107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:30 crc kubenswrapper[4926]: E1122 10:41:30.583293 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.609313 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.609393 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.609409 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.609428 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.609443 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.622734 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-c6w2q" podStartSLOduration=90.622702817 podStartE2EDuration="1m30.622702817s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.622621524 +0000 UTC m=+110.924226801" watchObservedRunningTime="2025-11-22 10:41:30.622702817 +0000 UTC m=+110.924308144" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.644692 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=85.644673827 podStartE2EDuration="1m25.644673827s" podCreationTimestamp="2025-11-22 10:40:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.644511392 +0000 UTC m=+110.946116689" watchObservedRunningTime="2025-11-22 10:41:30.644673827 +0000 UTC m=+110.946279114" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.710405 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-wqf9b" podStartSLOduration=91.710379542 podStartE2EDuration="1m31.710379542s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.681544277 +0000 UTC m=+110.983149564" watchObservedRunningTime="2025-11-22 10:41:30.710379542 +0000 UTC m=+111.011984849" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.714080 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.714112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.714121 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.714133 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.714141 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.764163 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=88.764145216 podStartE2EDuration="1m28.764145216s" podCreationTimestamp="2025-11-22 10:40:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.764103345 +0000 UTC m=+111.065708642" watchObservedRunningTime="2025-11-22 10:41:30.764145216 +0000 UTC m=+111.065750513" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.764612 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-4sppr" podStartSLOduration=91.764605369 podStartE2EDuration="1m31.764605369s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.74454046 +0000 UTC m=+111.046145747" watchObservedRunningTime="2025-11-22 10:41:30.764605369 +0000 UTC m=+111.066210676" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.793630 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.793609698 podStartE2EDuration="55.793609698s" podCreationTimestamp="2025-11-22 10:40:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.779565921 +0000 UTC m=+111.081171208" watchObservedRunningTime="2025-11-22 10:41:30.793609698 +0000 UTC m=+111.095214985" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.804997 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podStartSLOduration=90.804978453 podStartE2EDuration="1m30.804978453s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.804804069 +0000 UTC m=+111.106409366" watchObservedRunningTime="2025-11-22 10:41:30.804978453 +0000 UTC m=+111.106583740" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.816656 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.816716 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.816730 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.816744 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.816753 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.822513 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-sr572" podStartSLOduration=90.822495664 podStartE2EDuration="1m30.822495664s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.821999221 +0000 UTC m=+111.123604508" watchObservedRunningTime="2025-11-22 10:41:30.822495664 +0000 UTC m=+111.124100961" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.867693 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=87.867669298 podStartE2EDuration="1m27.867669298s" podCreationTimestamp="2025-11-22 10:40:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.86701313 +0000 UTC m=+111.168618417" watchObservedRunningTime="2025-11-22 10:41:30.867669298 +0000 UTC m=+111.169274605" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.868126 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=20.86811925 podStartE2EDuration="20.86811925s" podCreationTimestamp="2025-11-22 10:41:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.842347027 +0000 UTC m=+111.143952314" watchObservedRunningTime="2025-11-22 10:41:30.86811925 +0000 UTC m=+111.169724547" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.918947 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.919007 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.919024 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.919045 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.919061 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:30Z","lastTransitionTime":"2025-11-22T10:41:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:30 crc kubenswrapper[4926]: I1122 10:41:30.923967 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-cqsd2" podStartSLOduration=90.923945149 podStartE2EDuration="1m30.923945149s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:30.922993734 +0000 UTC m=+111.224599041" watchObservedRunningTime="2025-11-22 10:41:30.923945149 +0000 UTC m=+111.225550436" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.022033 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.022072 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.022081 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.022096 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.022106 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.124279 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.124353 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.124366 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.124381 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.124392 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.226639 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.226684 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.226699 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.226719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.226733 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.329399 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.329448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.329550 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.329578 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.329612 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.431757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.431805 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.431828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.431847 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.431862 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.533709 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.533739 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.533747 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.533760 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.533768 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.581540 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:31 crc kubenswrapper[4926]: E1122 10:41:31.581706 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.637724 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.637817 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.637828 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.637845 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.637856 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.740132 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.740204 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.740222 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.740247 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.740263 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.843521 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.843572 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.843587 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.843605 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.843617 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.945986 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.946067 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.946085 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.946112 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:31 crc kubenswrapper[4926]: I1122 10:41:31.946129 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:31Z","lastTransitionTime":"2025-11-22T10:41:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.048516 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.048592 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.048618 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.048643 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.048664 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.151708 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.151745 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.151757 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.151775 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.151787 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.254593 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.254665 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.254690 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.254719 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.254742 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.358008 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.358069 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.358101 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.358130 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.358156 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.462448 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.462532 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.462554 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.462586 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.462611 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.564929 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.564967 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.564980 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.564995 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.565008 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.582171 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.582228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.582182 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:32 crc kubenswrapper[4926]: E1122 10:41:32.582360 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:32 crc kubenswrapper[4926]: E1122 10:41:32.582463 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:32 crc kubenswrapper[4926]: E1122 10:41:32.582590 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.667597 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.667634 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.667645 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.667659 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.667670 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.770053 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.770126 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.770146 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.770168 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.770184 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.781714 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.781784 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.781807 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.781834 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.781854 4926 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T10:41:32Z","lastTransitionTime":"2025-11-22T10:41:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.845237 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966"] Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.845564 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.849463 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.854662 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.854750 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.854925 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.864234 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48320a8e-ad28-4fde-bf71-e61077f5a06f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.864297 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48320a8e-ad28-4fde-bf71-e61077f5a06f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.864367 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.864398 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48320a8e-ad28-4fde-bf71-e61077f5a06f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.864458 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965721 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48320a8e-ad28-4fde-bf71-e61077f5a06f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965767 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48320a8e-ad28-4fde-bf71-e61077f5a06f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965800 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965815 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48320a8e-ad28-4fde-bf71-e61077f5a06f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965859 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965958 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.965957 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/48320a8e-ad28-4fde-bf71-e61077f5a06f-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.967038 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/48320a8e-ad28-4fde-bf71-e61077f5a06f-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.971865 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/48320a8e-ad28-4fde-bf71-e61077f5a06f-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:32 crc kubenswrapper[4926]: I1122 10:41:32.981519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/48320a8e-ad28-4fde-bf71-e61077f5a06f-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5b966\" (UID: \"48320a8e-ad28-4fde-bf71-e61077f5a06f\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:33 crc kubenswrapper[4926]: I1122 10:41:33.178693 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" Nov 22 10:41:33 crc kubenswrapper[4926]: W1122 10:41:33.204632 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48320a8e_ad28_4fde_bf71_e61077f5a06f.slice/crio-0cad35af818640e9856816fb67e2f83b4db09e154d7a829ccaed4c1b3f7d4189 WatchSource:0}: Error finding container 0cad35af818640e9856816fb67e2f83b4db09e154d7a829ccaed4c1b3f7d4189: Status 404 returned error can't find the container with id 0cad35af818640e9856816fb67e2f83b4db09e154d7a829ccaed4c1b3f7d4189 Nov 22 10:41:33 crc kubenswrapper[4926]: I1122 10:41:33.221356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" event={"ID":"48320a8e-ad28-4fde-bf71-e61077f5a06f","Type":"ContainerStarted","Data":"0cad35af818640e9856816fb67e2f83b4db09e154d7a829ccaed4c1b3f7d4189"} Nov 22 10:41:33 crc kubenswrapper[4926]: I1122 10:41:33.581930 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:33 crc kubenswrapper[4926]: E1122 10:41:33.582150 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:34 crc kubenswrapper[4926]: I1122 10:41:34.227471 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" event={"ID":"48320a8e-ad28-4fde-bf71-e61077f5a06f","Type":"ContainerStarted","Data":"780fc85e91fff4c8193b1f8c166d9b3d5d8ae081e7d64c35bb6492c81e4899f3"} Nov 22 10:41:34 crc kubenswrapper[4926]: I1122 10:41:34.243272 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5b966" podStartSLOduration=94.2432458 podStartE2EDuration="1m34.2432458s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:34.242989613 +0000 UTC m=+114.544594930" watchObservedRunningTime="2025-11-22 10:41:34.2432458 +0000 UTC m=+114.544851117" Nov 22 10:41:34 crc kubenswrapper[4926]: I1122 10:41:34.581813 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:34 crc kubenswrapper[4926]: I1122 10:41:34.581989 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:34 crc kubenswrapper[4926]: I1122 10:41:34.581813 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:34 crc kubenswrapper[4926]: E1122 10:41:34.582181 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:34 crc kubenswrapper[4926]: E1122 10:41:34.582296 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:34 crc kubenswrapper[4926]: E1122 10:41:34.582451 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.233393 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/1.log" Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.234462 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/0.log" Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.234530 4926 generic.go:334] "Generic (PLEG): container finished" podID="36de2843-6491-4c54-b624-c4a3d328c164" containerID="954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae" exitCode=1 Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.234594 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerDied","Data":"954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae"} Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.234677 4926 scope.go:117] "RemoveContainer" containerID="13dff3bd18b5adc45fb92f8207a6726011752f430271dabbae9f2e94011f1b08" Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.235314 4926 scope.go:117] "RemoveContainer" containerID="954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae" Nov 22 10:41:35 crc kubenswrapper[4926]: E1122 10:41:35.235591 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-c6w2q_openshift-multus(36de2843-6491-4c54-b624-c4a3d328c164)\"" pod="openshift-multus/multus-c6w2q" podUID="36de2843-6491-4c54-b624-c4a3d328c164" Nov 22 10:41:35 crc kubenswrapper[4926]: I1122 10:41:35.581584 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:35 crc kubenswrapper[4926]: E1122 10:41:35.581757 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:36 crc kubenswrapper[4926]: I1122 10:41:36.241177 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/1.log" Nov 22 10:41:36 crc kubenswrapper[4926]: I1122 10:41:36.580964 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:36 crc kubenswrapper[4926]: E1122 10:41:36.581136 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:36 crc kubenswrapper[4926]: I1122 10:41:36.581225 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:36 crc kubenswrapper[4926]: E1122 10:41:36.581410 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:36 crc kubenswrapper[4926]: I1122 10:41:36.581233 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:36 crc kubenswrapper[4926]: E1122 10:41:36.581627 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:37 crc kubenswrapper[4926]: I1122 10:41:37.580863 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:37 crc kubenswrapper[4926]: E1122 10:41:37.581644 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:38 crc kubenswrapper[4926]: I1122 10:41:38.581027 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:38 crc kubenswrapper[4926]: I1122 10:41:38.581091 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:38 crc kubenswrapper[4926]: E1122 10:41:38.581171 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:38 crc kubenswrapper[4926]: E1122 10:41:38.581261 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:38 crc kubenswrapper[4926]: I1122 10:41:38.581816 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:38 crc kubenswrapper[4926]: E1122 10:41:38.582011 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:39 crc kubenswrapper[4926]: I1122 10:41:39.581448 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:39 crc kubenswrapper[4926]: E1122 10:41:39.581635 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:39 crc kubenswrapper[4926]: I1122 10:41:39.582648 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.256015 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/3.log" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.258151 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerStarted","Data":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.258516 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.288188 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podStartSLOduration=100.288173454 podStartE2EDuration="1m40.288173454s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:41:40.287503706 +0000 UTC m=+120.589109063" watchObservedRunningTime="2025-11-22 10:41:40.288173454 +0000 UTC m=+120.589778741" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.462333 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jfbf4"] Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.462437 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:40 crc kubenswrapper[4926]: E1122 10:41:40.462536 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.581171 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:40 crc kubenswrapper[4926]: I1122 10:41:40.581213 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:40 crc kubenswrapper[4926]: E1122 10:41:40.582187 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:40 crc kubenswrapper[4926]: E1122 10:41:40.582330 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:40 crc kubenswrapper[4926]: E1122 10:41:40.616660 4926 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 22 10:41:40 crc kubenswrapper[4926]: E1122 10:41:40.681363 4926 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:41:41 crc kubenswrapper[4926]: I1122 10:41:41.581618 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:41 crc kubenswrapper[4926]: E1122 10:41:41.581803 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:42 crc kubenswrapper[4926]: I1122 10:41:42.581993 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:42 crc kubenswrapper[4926]: I1122 10:41:42.582076 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:42 crc kubenswrapper[4926]: E1122 10:41:42.582180 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:42 crc kubenswrapper[4926]: I1122 10:41:42.582230 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:42 crc kubenswrapper[4926]: E1122 10:41:42.582305 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:42 crc kubenswrapper[4926]: E1122 10:41:42.582492 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:43 crc kubenswrapper[4926]: I1122 10:41:43.581028 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:43 crc kubenswrapper[4926]: E1122 10:41:43.581209 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:44 crc kubenswrapper[4926]: I1122 10:41:44.581424 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:44 crc kubenswrapper[4926]: I1122 10:41:44.581583 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:44 crc kubenswrapper[4926]: E1122 10:41:44.581682 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:44 crc kubenswrapper[4926]: I1122 10:41:44.581588 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:44 crc kubenswrapper[4926]: E1122 10:41:44.581801 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:44 crc kubenswrapper[4926]: E1122 10:41:44.581970 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:45 crc kubenswrapper[4926]: I1122 10:41:45.581080 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:45 crc kubenswrapper[4926]: E1122 10:41:45.581256 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:45 crc kubenswrapper[4926]: E1122 10:41:45.682609 4926 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:41:46 crc kubenswrapper[4926]: I1122 10:41:46.581483 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:46 crc kubenswrapper[4926]: I1122 10:41:46.581524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:46 crc kubenswrapper[4926]: E1122 10:41:46.582229 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:46 crc kubenswrapper[4926]: E1122 10:41:46.582364 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:46 crc kubenswrapper[4926]: I1122 10:41:46.581691 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:46 crc kubenswrapper[4926]: E1122 10:41:46.582501 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:47 crc kubenswrapper[4926]: I1122 10:41:47.581546 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:47 crc kubenswrapper[4926]: E1122 10:41:47.581737 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:48 crc kubenswrapper[4926]: I1122 10:41:48.580994 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:48 crc kubenswrapper[4926]: I1122 10:41:48.581013 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:48 crc kubenswrapper[4926]: E1122 10:41:48.581225 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:48 crc kubenswrapper[4926]: I1122 10:41:48.581329 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:48 crc kubenswrapper[4926]: E1122 10:41:48.581560 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:48 crc kubenswrapper[4926]: E1122 10:41:48.581629 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:49 crc kubenswrapper[4926]: I1122 10:41:49.581425 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:49 crc kubenswrapper[4926]: E1122 10:41:49.581878 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:49 crc kubenswrapper[4926]: I1122 10:41:49.581977 4926 scope.go:117] "RemoveContainer" containerID="954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae" Nov 22 10:41:50 crc kubenswrapper[4926]: I1122 10:41:50.302568 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/1.log" Nov 22 10:41:50 crc kubenswrapper[4926]: I1122 10:41:50.302665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerStarted","Data":"714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74"} Nov 22 10:41:50 crc kubenswrapper[4926]: I1122 10:41:50.581700 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:50 crc kubenswrapper[4926]: I1122 10:41:50.581738 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:50 crc kubenswrapper[4926]: E1122 10:41:50.581959 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:50 crc kubenswrapper[4926]: I1122 10:41:50.582021 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:50 crc kubenswrapper[4926]: E1122 10:41:50.583971 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:50 crc kubenswrapper[4926]: E1122 10:41:50.584102 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:50 crc kubenswrapper[4926]: E1122 10:41:50.683713 4926 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 10:41:51 crc kubenswrapper[4926]: I1122 10:41:51.581778 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:51 crc kubenswrapper[4926]: E1122 10:41:51.582235 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:52 crc kubenswrapper[4926]: I1122 10:41:52.581714 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:52 crc kubenswrapper[4926]: E1122 10:41:52.581951 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:52 crc kubenswrapper[4926]: I1122 10:41:52.581992 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:52 crc kubenswrapper[4926]: I1122 10:41:52.582021 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:52 crc kubenswrapper[4926]: E1122 10:41:52.582486 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:52 crc kubenswrapper[4926]: E1122 10:41:52.582634 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:53 crc kubenswrapper[4926]: I1122 10:41:53.581851 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:53 crc kubenswrapper[4926]: E1122 10:41:53.582141 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:54 crc kubenswrapper[4926]: I1122 10:41:54.581529 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:54 crc kubenswrapper[4926]: I1122 10:41:54.581602 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:54 crc kubenswrapper[4926]: I1122 10:41:54.581673 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:54 crc kubenswrapper[4926]: E1122 10:41:54.581735 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 10:41:54 crc kubenswrapper[4926]: E1122 10:41:54.581883 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jfbf4" podUID="c42b6f47-b1a4-4fee-8681-3b5288370323" Nov 22 10:41:54 crc kubenswrapper[4926]: E1122 10:41:54.582090 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 10:41:55 crc kubenswrapper[4926]: I1122 10:41:55.585432 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:41:55 crc kubenswrapper[4926]: E1122 10:41:55.585700 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.581731 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.582223 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.582483 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.584929 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.586596 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.586653 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.586744 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.586775 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 10:41:56 crc kubenswrapper[4926]: I1122 10:41:56.587425 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 22 10:41:57 crc kubenswrapper[4926]: I1122 10:41:57.581317 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:02 crc kubenswrapper[4926]: I1122 10:42:02.526459 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.260379 4926 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.298476 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d4vh5"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.299401 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.302632 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.303168 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.303308 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.303772 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.305612 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.306663 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.306737 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wvt6w"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.307529 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.307860 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.308497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.308853 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.308940 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.308943 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.309451 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.310423 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-8l5sv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.310927 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.313799 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314077 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314156 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314369 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314653 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314701 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314818 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314817 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.315010 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.314615 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.315250 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.315360 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.315254 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.315593 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zk2x8"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316015 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316324 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316433 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316532 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316559 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316403 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.316874 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.319520 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-954dx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.320173 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.322674 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.323346 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.323751 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.327245 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.329963 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.330514 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.331424 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.331531 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.331635 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.331746 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.332592 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337254 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337440 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337614 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337665 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337706 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337882 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.337989 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338084 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338248 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338450 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338810 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338835 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338908 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.338977 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339042 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339106 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339157 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339210 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339343 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339429 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339526 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339647 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339706 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339734 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339849 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339943 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339951 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.340185 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.339578 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.344107 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.344584 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.344837 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.345305 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.346158 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.347132 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.349149 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.349555 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.352082 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.352332 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.352536 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.352605 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.352539 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.375190 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.375613 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-jkqsx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.376386 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.377125 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.377637 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.377978 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.378399 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jb7x7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.384148 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.385089 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.385206 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.385319 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.386882 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.391354 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.392262 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.392739 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.392809 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.393185 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.393331 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.393416 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.393528 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.393996 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.394014 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.394416 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.395229 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.394505 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.394673 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.395948 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396129 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396264 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396345 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396472 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396576 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.396611 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.399166 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-js7td"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.399718 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.400129 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.400316 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.401714 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.401893 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.401970 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402477 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c14868fd-4ccf-4779-81e8-dc3b30393f1f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402513 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402538 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402576 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-audit-policies\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402597 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402624 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2mfz\" (UniqueName: \"kubernetes.io/projected/0dc0778b-7907-42c7-a656-22c517a50c3a-kube-api-access-s2mfz\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402650 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66l87\" (UniqueName: \"kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402674 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7wqx\" (UniqueName: \"kubernetes.io/projected/6e0f47bd-848d-4619-a2f9-eb503d04e2e0-kube-api-access-w7wqx\") pod \"downloads-7954f5f757-8l5sv\" (UID: \"6e0f47bd-848d-4619-a2f9-eb503d04e2e0\") " pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402693 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-node-pullsecrets\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402714 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dac98899-4c89-48b4-bf2d-0aee1eb95eff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402736 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk7sn\" (UniqueName: \"kubernetes.io/projected/dac98899-4c89-48b4-bf2d-0aee1eb95eff-kube-api-access-wk7sn\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402754 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-config\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402776 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit-dir\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402793 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-client\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402811 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/831547cf-3937-4d20-83b8-9570e309f0b3-audit-dir\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402831 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402865 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402905 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402926 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g4xh\" (UniqueName: \"kubernetes.io/projected/c44b98ae-cd26-48e0-9c72-725fd64d22f9-kube-api-access-5g4xh\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402948 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.402982 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8wkh\" (UniqueName: \"kubernetes.io/projected/f209060d-bbc4-4f07-82e2-1e8b212c1f56-kube-api-access-c8wkh\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403035 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc0778b-7907-42c7-a656-22c517a50c3a-serving-cert\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403055 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-service-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403075 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403097 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403118 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-image-import-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403138 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403184 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403210 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njc5s\" (UniqueName: \"kubernetes.io/projected/2d43d07f-3f14-4be9-9801-d40bda91eb2e-kube-api-access-njc5s\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403232 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c14868fd-4ccf-4779-81e8-dc3b30393f1f-serving-cert\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403251 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403289 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403312 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j696v\" (UniqueName: \"kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403331 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-client\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403353 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-serving-cert\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403374 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd54r\" (UniqueName: \"kubernetes.io/projected/831547cf-3937-4d20-83b8-9570e309f0b3-kube-api-access-hd54r\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403396 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403430 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-encryption-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403443 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403456 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403712 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403737 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch7n5\" (UniqueName: \"kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403769 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-trusted-ca\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403793 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403798 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403844 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f209060d-bbc4-4f07-82e2-1e8b212c1f56-machine-approver-tls\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403861 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403917 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403952 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403970 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.403984 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-config\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404001 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404014 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-encryption-config\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404029 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404045 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkjrs\" (UniqueName: \"kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404066 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4s7j\" (UniqueName: \"kubernetes.io/projected/c14868fd-4ccf-4779-81e8-dc3b30393f1f-kube-api-access-x4s7j\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404083 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-serving-cert\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404168 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404215 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404237 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404260 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404282 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404284 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d43d07f-3f14-4be9-9801-d40bda91eb2e-serving-cert\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404333 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-auth-proxy-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404377 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-serving-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.404952 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.405267 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.405452 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.419162 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.419591 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.438688 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.445057 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.445373 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.445690 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.445769 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.446268 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.446564 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.446941 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.447702 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.449614 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.449785 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.450906 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.451631 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.451772 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9ph96"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.452483 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.452869 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.453002 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.453647 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.454182 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.454580 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d4vh5"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.455564 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.456172 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.456462 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.456958 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.458105 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.458651 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.459228 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.459734 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.460365 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.461654 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.463082 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.464631 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.465300 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.467719 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-8k4sd"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.469538 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.469910 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.472133 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.472294 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.472644 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2l9hr"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.473166 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.480440 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.481781 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.482075 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.482744 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.491333 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.492838 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wvt6w"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.494549 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-p5j7r"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.500173 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-k24qp"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.500708 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.501407 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.501597 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.501654 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.501670 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.501788 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-jkqsx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.503822 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-954dx"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.504492 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.505660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-config\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.506253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.506418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-encryption-config\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.506568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.506696 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkjrs\" (UniqueName: \"kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.506862 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4s7j\" (UniqueName: \"kubernetes.io/projected/c14868fd-4ccf-4779-81e8-dc3b30393f1f-kube-api-access-x4s7j\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.508716 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-serving-cert\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.508876 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509040 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-metrics-tls\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509130 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s76s5\" (UniqueName: \"kubernetes.io/projected/261fec63-db4b-4580-a128-3cc51da2cc93-kube-api-access-s76s5\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509251 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-config\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509311 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.507050 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509348 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jb7x7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509261 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509791 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.509950 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510080 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510182 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w54x\" (UniqueName: \"kubernetes.io/projected/3711dcd6-7ec3-4610-857d-e24f38c6e986-kube-api-access-7w54x\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510306 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/40a4057a-5a42-463f-aeb2-995754abca81-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510396 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d43d07f-3f14-4be9-9801-d40bda91eb2e-serving-cert\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510485 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510624 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510757 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw2sr\" (UniqueName: \"kubernetes.io/projected/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-kube-api-access-bw2sr\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510862 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-client\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.510979 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511112 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-auth-proxy-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511220 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-serving-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511332 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thzbk\" (UniqueName: \"kubernetes.io/projected/e0882887-a6d9-4aac-a7d7-c14b934298e2-kube-api-access-thzbk\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-trusted-ca\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511562 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46497\" (UniqueName: \"kubernetes.io/projected/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-kube-api-access-46497\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511653 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c14868fd-4ccf-4779-81e8-dc3b30393f1f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511738 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511814 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-images\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511913 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512008 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512093 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-audit-policies\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512181 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512264 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-config\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7bb5a83-ce79-42db-a6d0-996b1405d668-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512409 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2mfz\" (UniqueName: \"kubernetes.io/projected/0dc0778b-7907-42c7-a656-22c517a50c3a-kube-api-access-s2mfz\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512741 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnxzd\" (UniqueName: \"kubernetes.io/projected/3e81d5b7-f92f-4de5-bb55-512620175698-kube-api-access-nnxzd\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512824 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/05be3235-93e8-4d5c-8f34-9c47f694bb1b-proxy-tls\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512913 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-srv-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512992 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9451a32-8743-493b-87a2-e7473354f0a4-config\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.513078 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66l87\" (UniqueName: \"kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.513170 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-metrics-tls\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.511525 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.513318 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7wqx\" (UniqueName: \"kubernetes.io/projected/6e0f47bd-848d-4619-a2f9-eb503d04e2e0-kube-api-access-w7wqx\") pod \"downloads-7954f5f757-8l5sv\" (UID: \"6e0f47bd-848d-4619-a2f9-eb503d04e2e0\") " pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.533966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d43d07f-3f14-4be9-9801-d40bda91eb2e-serving-cert\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.534335 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-encryption-config\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.534791 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-serving-cert\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.534957 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.514975 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-node-pullsecrets\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.535760 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.535829 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dac98899-4c89-48b4-bf2d-0aee1eb95eff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537016 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk7sn\" (UniqueName: \"kubernetes.io/projected/dac98899-4c89-48b4-bf2d-0aee1eb95eff-kube-api-access-wk7sn\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537045 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-config\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537067 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit-dir\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537107 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-images\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537130 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-serving-cert\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537178 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-client\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537201 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/831547cf-3937-4d20-83b8-9570e309f0b3-audit-dir\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537314 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537333 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9451a32-8743-493b-87a2-e7473354f0a4-serving-cert\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537376 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7bb5a83-ce79-42db-a6d0-996b1405d668-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537398 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9464da92-3307-4d40-8643-133ecf84d523-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537571 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537597 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537618 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g4xh\" (UniqueName: \"kubernetes.io/projected/c44b98ae-cd26-48e0-9c72-725fd64d22f9-kube-api-access-5g4xh\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537657 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537720 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0882887-a6d9-4aac-a7d7-c14b934298e2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537744 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp7tn\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-kube-api-access-pp7tn\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537799 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537826 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537915 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8wkh\" (UniqueName: \"kubernetes.io/projected/f209060d-bbc4-4f07-82e2-1e8b212c1f56-kube-api-access-c8wkh\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537935 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc0778b-7907-42c7-a656-22c517a50c3a-serving-cert\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.537988 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-service-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538006 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538045 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538067 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538118 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538135 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-image-import-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538190 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538252 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538293 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538329 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njc5s\" (UniqueName: \"kubernetes.io/projected/2d43d07f-3f14-4be9-9801-d40bda91eb2e-kube-api-access-njc5s\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c14868fd-4ccf-4779-81e8-dc3b30393f1f-serving-cert\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538388 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538417 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538463 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prkxl\" (UniqueName: \"kubernetes.io/projected/e7bb5a83-ce79-42db-a6d0-996b1405d668-kube-api-access-prkxl\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538538 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j696v\" (UniqueName: \"kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-config\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538620 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538639 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmr89\" (UniqueName: \"kubernetes.io/projected/f9451a32-8743-493b-87a2-e7473354f0a4-kube-api-access-gmr89\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538680 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-client\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-serving-cert\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd54r\" (UniqueName: \"kubernetes.io/projected/831547cf-3937-4d20-83b8-9570e309f0b3-kube-api-access-hd54r\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538765 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9wb7\" (UniqueName: \"kubernetes.io/projected/05be3235-93e8-4d5c-8f34-9c47f694bb1b-kube-api-access-p9wb7\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538780 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3711dcd6-7ec3-4610-857d-e24f38c6e986-tmpfs\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538834 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538863 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-encryption-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538949 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.538977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539020 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch7n5\" (UniqueName: \"kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539122 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-trusted-ca\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539151 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539254 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-serving-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539189 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-apiservice-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539317 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8978x\" (UniqueName: \"kubernetes.io/projected/40a4057a-5a42-463f-aeb2-995754abca81-kube-api-access-8978x\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539337 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gmbh\" (UniqueName: \"kubernetes.io/projected/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-kube-api-access-6gmbh\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f209060d-bbc4-4f07-82e2-1e8b212c1f56-machine-approver-tls\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539391 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539439 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-webhook-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539606 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-profile-collector-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539769 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.539810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-service-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.540829 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.541652 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.542492 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-trusted-ca-bundle\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.542729 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-audit-policies\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.542747 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512457 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/c14868fd-4ccf-4779-81e8-dc3b30393f1f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.543849 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.544430 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/dac98899-4c89-48b4-bf2d-0aee1eb95eff-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.536789 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.544719 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.545073 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.545299 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-encryption-config\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.545318 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.545347 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-config\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.545408 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit-dir\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.512351 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-auth-proxy-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.546647 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/831547cf-3937-4d20-83b8-9570e309f0b3-audit-dir\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.546870 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d43d07f-3f14-4be9-9801-d40bda91eb2e-trusted-ca\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.546912 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.547180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-audit\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.547588 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zk2x8"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.547826 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f209060d-bbc4-4f07-82e2-1e8b212c1f56-config\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.548341 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c44b98ae-cd26-48e0-9c72-725fd64d22f9-node-pullsecrets\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.551057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.552611 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.555431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-serving-cert\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.557021 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.557119 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-client\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.557666 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.557741 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.557831 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-service-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.558849 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.558862 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.559371 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c14868fd-4ccf-4779-81e8-dc3b30393f1f-serving-cert\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.559951 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.560066 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.560284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0dc0778b-7907-42c7-a656-22c517a50c3a-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.560639 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/831547cf-3937-4d20-83b8-9570e309f0b3-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.560927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.561406 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.561480 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c44b98ae-cd26-48e0-9c72-725fd64d22f9-etcd-client\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.561578 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f209060d-bbc4-4f07-82e2-1e8b212c1f56-machine-approver-tls\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.562036 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.562378 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c44b98ae-cd26-48e0-9c72-725fd64d22f9-image-import-ca\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.562865 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.563555 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.563554 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.563907 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.564844 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-js7td"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.565385 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0dc0778b-7907-42c7-a656-22c517a50c3a-serving-cert\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.567470 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.568173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.570860 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.572166 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-8l5sv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.574926 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.576582 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.578681 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-qhj5l"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.579279 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.580003 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-mwqpv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.580751 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.581969 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9ph96"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.582681 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.584342 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.585640 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.587002 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2l9hr"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.588443 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.589573 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.591499 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.592836 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.593931 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.595529 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-k24qp"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.597047 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-mwqpv"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.598327 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.599736 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.601037 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.601746 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.602962 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.603976 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.605703 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.606537 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-p5j7r"] Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.621859 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.640972 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641036 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-apiservice-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641081 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8978x\" (UniqueName: \"kubernetes.io/projected/40a4057a-5a42-463f-aeb2-995754abca81-kube-api-access-8978x\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641201 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gmbh\" (UniqueName: \"kubernetes.io/projected/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-kube-api-access-6gmbh\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641226 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-webhook-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641243 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-profile-collector-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641268 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-service-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641297 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-metrics-tls\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641311 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s76s5\" (UniqueName: \"kubernetes.io/projected/261fec63-db4b-4580-a128-3cc51da2cc93-kube-api-access-s76s5\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641333 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w54x\" (UniqueName: \"kubernetes.io/projected/3711dcd6-7ec3-4610-857d-e24f38c6e986-kube-api-access-7w54x\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641353 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/40a4057a-5a42-463f-aeb2-995754abca81-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641376 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw2sr\" (UniqueName: \"kubernetes.io/projected/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-kube-api-access-bw2sr\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641396 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-client\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641417 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thzbk\" (UniqueName: \"kubernetes.io/projected/e0882887-a6d9-4aac-a7d7-c14b934298e2-kube-api-access-thzbk\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641461 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-trusted-ca\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641477 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46497\" (UniqueName: \"kubernetes.io/projected/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-kube-api-access-46497\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641494 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-images\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641509 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641526 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-config\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641542 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641559 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7bb5a83-ce79-42db-a6d0-996b1405d668-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641608 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnxzd\" (UniqueName: \"kubernetes.io/projected/3e81d5b7-f92f-4de5-bb55-512620175698-kube-api-access-nnxzd\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/05be3235-93e8-4d5c-8f34-9c47f694bb1b-proxy-tls\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641655 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-srv-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641672 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9451a32-8743-493b-87a2-e7473354f0a4-config\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641686 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-metrics-tls\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641718 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-images\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641734 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-serving-cert\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641747 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9451a32-8743-493b-87a2-e7473354f0a4-serving-cert\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641762 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7bb5a83-ce79-42db-a6d0-996b1405d668-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641776 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9464da92-3307-4d40-8643-133ecf84d523-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641806 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641821 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0882887-a6d9-4aac-a7d7-c14b934298e2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641835 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp7tn\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-kube-api-access-pp7tn\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641856 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641878 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641931 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prkxl\" (UniqueName: \"kubernetes.io/projected/e7bb5a83-ce79-42db-a6d0-996b1405d668-kube-api-access-prkxl\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641954 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-config\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641970 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.641988 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmr89\" (UniqueName: \"kubernetes.io/projected/f9451a32-8743-493b-87a2-e7473354f0a4-kube-api-access-gmr89\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.642010 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9wb7\" (UniqueName: \"kubernetes.io/projected/05be3235-93e8-4d5c-8f34-9c47f694bb1b-kube-api-access-p9wb7\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.642026 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3711dcd6-7ec3-4610-857d-e24f38c6e986-tmpfs\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.642706 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.642862 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/3711dcd6-7ec3-4610-857d-e24f38c6e986-tmpfs\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.644162 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7bb5a83-ce79-42db-a6d0-996b1405d668-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.644271 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-images\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.644301 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-auth-proxy-config\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.644998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0882887-a6d9-4aac-a7d7-c14b934298e2-config\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.645617 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-profile-collector-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.646184 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-metrics-tls\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.647453 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/e0882887-a6d9-4aac-a7d7-c14b934298e2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.647614 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/261fec63-db4b-4580-a128-3cc51da2cc93-srv-cert\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.647758 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7bb5a83-ce79-42db-a6d0-996b1405d668-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.662743 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.702013 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.721991 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.741766 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.762474 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.766400 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/40a4057a-5a42-463f-aeb2-995754abca81-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.782062 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.801571 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.821902 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.841851 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.862793 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.870007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-metrics-tls\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.882756 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.907897 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.915552 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-trusted-ca\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.921968 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.942333 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.961694 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 22 10:42:03 crc kubenswrapper[4926]: I1122 10:42:03.990314 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.003126 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.022390 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.042500 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.062762 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.064770 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.082202 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.102854 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.106384 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.122496 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.142873 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.163323 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.182938 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.186437 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9451a32-8743-493b-87a2-e7473354f0a4-serving-cert\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.203159 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.205079 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9451a32-8743-493b-87a2-e7473354f0a4-config\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.223081 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.233091 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-service-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.243084 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.262956 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.267244 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-serving-cert\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.282536 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.286297 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-client\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.301630 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.323226 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.343198 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.345004 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-config\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.362468 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.365519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/3e81d5b7-f92f-4de5-bb55-512620175698-etcd-ca\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.383117 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.384858 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/05be3235-93e8-4d5c-8f34-9c47f694bb1b-images\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.402775 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.421520 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.427337 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/05be3235-93e8-4d5c-8f34-9c47f694bb1b-proxy-tls\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.443190 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.459549 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-webhook-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.459916 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3711dcd6-7ec3-4610-857d-e24f38c6e986-apiservice-cert\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.461227 4926 request.go:700] Waited for 1.004076305s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/configmaps?fieldSelector=metadata.name%3Dtrusted-ca&limit=500&resourceVersion=0 Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.471338 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.482703 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.502798 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.523538 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.543700 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.563119 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.582344 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.602710 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.623146 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.641649 4926 secret.go:188] Couldn't get secret openshift-kube-scheduler-operator/kube-scheduler-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.642149 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert podName:9464da92-3307-4d40-8643-133ecf84d523 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:05.142119191 +0000 UTC m=+145.443724508 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert") pod "openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" (UID: "9464da92-3307-4d40-8643-133ecf84d523") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.642382 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.643369 4926 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.643422 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle podName:32923a07-7dfd-47a6-9b84-6cf7ebd329fa nodeName:}" failed. No retries permitted until 2025-11-22 10:42:05.143405702 +0000 UTC m=+145.445010989 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle") pod "service-ca-9c57cc56f-2l9hr" (UID: "32923a07-7dfd-47a6-9b84-6cf7ebd329fa") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.643448 4926 configmap.go:193] Couldn't get configMap openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.643468 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config podName:9464da92-3307-4d40-8643-133ecf84d523 nodeName:}" failed. No retries permitted until 2025-11-22 10:42:05.143462424 +0000 UTC m=+145.445067711 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config") pod "openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" (UID: "9464da92-3307-4d40-8643-133ecf84d523") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.643844 4926 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: E1122 10:42:04.644215 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key podName:32923a07-7dfd-47a6-9b84-6cf7ebd329fa nodeName:}" failed. No retries permitted until 2025-11-22 10:42:05.144153061 +0000 UTC m=+145.445758378 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key") pod "service-ca-9c57cc56f-2l9hr" (UID: "32923a07-7dfd-47a6-9b84-6cf7ebd329fa") : failed to sync secret cache: timed out waiting for the condition Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.662084 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.681630 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.702596 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.723210 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.741873 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.763100 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.782510 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.802300 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.823337 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.843086 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.864531 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.883830 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.904048 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.923281 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.943405 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.963155 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 22 10:42:04 crc kubenswrapper[4926]: I1122 10:42:04.983260 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.002520 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.023007 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.062852 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.084038 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.101478 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.122858 4926 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.142919 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.163718 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.168863 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.169136 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.169466 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.169545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.171043 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9464da92-3307-4d40-8643-133ecf84d523-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.171443 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-cabundle\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.175651 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9464da92-3307-4d40-8643-133ecf84d523-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.175857 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-signing-key\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.182811 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.203125 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.223252 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.262685 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4s7j\" (UniqueName: \"kubernetes.io/projected/c14868fd-4ccf-4779-81e8-dc3b30393f1f-kube-api-access-x4s7j\") pod \"openshift-config-operator-7777fb866f-954dx\" (UID: \"c14868fd-4ccf-4779-81e8-dc3b30393f1f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.284522 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkjrs\" (UniqueName: \"kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs\") pod \"console-f9d7485db-nsj2w\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.299304 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7wqx\" (UniqueName: \"kubernetes.io/projected/6e0f47bd-848d-4619-a2f9-eb503d04e2e0-kube-api-access-w7wqx\") pod \"downloads-7954f5f757-8l5sv\" (UID: \"6e0f47bd-848d-4619-a2f9-eb503d04e2e0\") " pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.319144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2mfz\" (UniqueName: \"kubernetes.io/projected/0dc0778b-7907-42c7-a656-22c517a50c3a-kube-api-access-s2mfz\") pod \"authentication-operator-69f744f599-wvt6w\" (UID: \"0dc0778b-7907-42c7-a656-22c517a50c3a\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.344291 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.350727 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njc5s\" (UniqueName: \"kubernetes.io/projected/2d43d07f-3f14-4be9-9801-d40bda91eb2e-kube-api-access-njc5s\") pod \"console-operator-58897d9998-zk2x8\" (UID: \"2d43d07f-3f14-4be9-9801-d40bda91eb2e\") " pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.360935 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch7n5\" (UniqueName: \"kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5\") pod \"route-controller-manager-6576b87f9c-pdkqk\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.378689 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.385815 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk7sn\" (UniqueName: \"kubernetes.io/projected/dac98899-4c89-48b4-bf2d-0aee1eb95eff-kube-api-access-wk7sn\") pod \"cluster-samples-operator-665b6dd947-944rj\" (UID: \"dac98899-4c89-48b4-bf2d-0aee1eb95eff\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.399743 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66l87\" (UniqueName: \"kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87\") pod \"oauth-openshift-558db77b4-g6m6g\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.423625 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j696v\" (UniqueName: \"kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v\") pod \"controller-manager-879f6c89f-c56h5\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.440598 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.441989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd54r\" (UniqueName: \"kubernetes.io/projected/831547cf-3937-4d20-83b8-9570e309f0b3-kube-api-access-hd54r\") pod \"apiserver-7bbb656c7d-7zp2q\" (UID: \"831547cf-3937-4d20-83b8-9570e309f0b3\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.456593 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8wkh\" (UniqueName: \"kubernetes.io/projected/f209060d-bbc4-4f07-82e2-1e8b212c1f56-kube-api-access-c8wkh\") pod \"machine-approver-56656f9798-ztdwg\" (UID: \"f209060d-bbc4-4f07-82e2-1e8b212c1f56\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.480310 4926 request.go:700] Waited for 1.900813552s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-tls&limit=500&resourceVersion=0 Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.481117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g4xh\" (UniqueName: \"kubernetes.io/projected/c44b98ae-cd26-48e0-9c72-725fd64d22f9-kube-api-access-5g4xh\") pod \"apiserver-76f77b778f-d4vh5\" (UID: \"c44b98ae-cd26-48e0-9c72-725fd64d22f9\") " pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.482129 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.489310 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.502266 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.503527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.522298 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 22 10:42:05 crc kubenswrapper[4926]: W1122 10:42:05.527093 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf209060d_bbc4_4f07_82e2_1e8b212c1f56.slice/crio-5ebd25bad188162a24f5031d36ee0b7e13091fb7d8f88e5708981b39de855ed0 WatchSource:0}: Error finding container 5ebd25bad188162a24f5031d36ee0b7e13091fb7d8f88e5708981b39de855ed0: Status 404 returned error can't find the container with id 5ebd25bad188162a24f5031d36ee0b7e13091fb7d8f88e5708981b39de855ed0 Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.542360 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.546024 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.562138 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.564686 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.569866 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-954dx"] Nov 22 10:42:05 crc kubenswrapper[4926]: W1122 10:42:05.579156 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc14868fd_4ccf_4779_81e8_dc3b30393f1f.slice/crio-e48c805dd06d522424c9a10359ca4283db907f3d7d7066b28ac894f3cd08b760 WatchSource:0}: Error finding container e48c805dd06d522424c9a10359ca4283db907f3d7d7066b28ac894f3cd08b760: Status 404 returned error can't find the container with id e48c805dd06d522424c9a10359ca4283db907f3d7d7066b28ac894f3cd08b760 Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.582310 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.590844 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.598086 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.612683 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.620168 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8978x\" (UniqueName: \"kubernetes.io/projected/40a4057a-5a42-463f-aeb2-995754abca81-kube-api-access-8978x\") pod \"multus-admission-controller-857f4d67dd-js7td\" (UID: \"40a4057a-5a42-463f-aeb2-995754abca81\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.625129 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.646241 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gmbh\" (UniqueName: \"kubernetes.io/projected/80a51b2f-5a67-4cb4-ab41-09516cec7e4d-kube-api-access-6gmbh\") pod \"openshift-apiserver-operator-796bbdcf4f-rndxt\" (UID: \"80a51b2f-5a67-4cb4-ab41-09516cec7e4d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.651088 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.667564 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46497\" (UniqueName: \"kubernetes.io/projected/32923a07-7dfd-47a6-9b84-6cf7ebd329fa-kube-api-access-46497\") pod \"service-ca-9c57cc56f-2l9hr\" (UID: \"32923a07-7dfd-47a6-9b84-6cf7ebd329fa\") " pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.679488 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s76s5\" (UniqueName: \"kubernetes.io/projected/261fec63-db4b-4580-a128-3cc51da2cc93-kube-api-access-s76s5\") pod \"olm-operator-6b444d44fb-brhnh\" (UID: \"261fec63-db4b-4580-a128-3cc51da2cc93\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.687077 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.700560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnxzd\" (UniqueName: \"kubernetes.io/projected/3e81d5b7-f92f-4de5-bb55-512620175698-kube-api-access-nnxzd\") pod \"etcd-operator-b45778765-9ph96\" (UID: \"3e81d5b7-f92f-4de5-bb55-512620175698\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.719014 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prkxl\" (UniqueName: \"kubernetes.io/projected/e7bb5a83-ce79-42db-a6d0-996b1405d668-kube-api-access-prkxl\") pod \"openshift-controller-manager-operator-756b6f6bc6-dl4z4\" (UID: \"e7bb5a83-ce79-42db-a6d0-996b1405d668\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.725326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.729386 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.737442 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-bound-sa-token\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.749061 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-wvt6w"] Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.762514 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.763965 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw2sr\" (UniqueName: \"kubernetes.io/projected/731bcd4c-12b4-408c-a30b-dd7ccd6a0712-kube-api-access-bw2sr\") pod \"dns-operator-744455d44c-jb7x7\" (UID: \"731bcd4c-12b4-408c-a30b-dd7ccd6a0712\") " pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:05 crc kubenswrapper[4926]: W1122 10:42:05.772713 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0dc0778b_7907_42c7_a656_22c517a50c3a.slice/crio-ec8a34be789389a9e829fd783fba32089896ffed8c7bda6b7f4e3d9657b202ad WatchSource:0}: Error finding container ec8a34be789389a9e829fd783fba32089896ffed8c7bda6b7f4e3d9657b202ad: Status 404 returned error can't find the container with id ec8a34be789389a9e829fd783fba32089896ffed8c7bda6b7f4e3d9657b202ad Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.778804 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.781706 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp7tn\" (UniqueName: \"kubernetes.io/projected/e0ad330e-054d-4f9f-89e1-2e18fca1e66a-kube-api-access-pp7tn\") pod \"ingress-operator-5b745b69d9-wlvg2\" (UID: \"e0ad330e-054d-4f9f-89e1-2e18fca1e66a\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.798635 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.801124 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thzbk\" (UniqueName: \"kubernetes.io/projected/e0882887-a6d9-4aac-a7d7-c14b934298e2-kube-api-access-thzbk\") pod \"machine-api-operator-5694c8668f-jkqsx\" (UID: \"e0882887-a6d9-4aac-a7d7-c14b934298e2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.816980 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/9464da92-3307-4d40-8643-133ecf84d523-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5mlfw\" (UID: \"9464da92-3307-4d40-8643-133ecf84d523\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.822197 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q"] Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.830502 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj"] Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.836194 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmr89\" (UniqueName: \"kubernetes.io/projected/f9451a32-8743-493b-87a2-e7473354f0a4-kube-api-access-gmr89\") pod \"service-ca-operator-777779d784-dcbsr\" (UID: \"f9451a32-8743-493b-87a2-e7473354f0a4\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:05 crc kubenswrapper[4926]: W1122 10:42:05.855852 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod831547cf_3937_4d20_83b8_9570e309f0b3.slice/crio-0a54ef2695f173644f09f0124c7a91ee96369c12f066784ece97f1d4f17ccaa0 WatchSource:0}: Error finding container 0a54ef2695f173644f09f0124c7a91ee96369c12f066784ece97f1d4f17ccaa0: Status 404 returned error can't find the container with id 0a54ef2695f173644f09f0124c7a91ee96369c12f066784ece97f1d4f17ccaa0 Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.862794 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.879018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9wb7\" (UniqueName: \"kubernetes.io/projected/05be3235-93e8-4d5c-8f34-9c47f694bb1b-kube-api-access-p9wb7\") pod \"machine-config-operator-74547568cd-pzqp7\" (UID: \"05be3235-93e8-4d5c-8f34-9c47f694bb1b\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.879615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w54x\" (UniqueName: \"kubernetes.io/projected/3711dcd6-7ec3-4610-857d-e24f38c6e986-kube-api-access-7w54x\") pod \"packageserver-d55dfcdfc-rwrgv\" (UID: \"3711dcd6-7ec3-4610-857d-e24f38c6e986\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.889821 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.981688 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/171f6f42-9983-401e-9aa1-1f1a7dfd412c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982079 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982175 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-stats-auth\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982244 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm522\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982319 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982415 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-proxy-tls\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982500 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63ad9007-0aba-4183-95d0-d97f7034841d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982588 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982638 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63ad9007-0aba-4183-95d0-d97f7034841d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63ad9007-0aba-4183-95d0-d97f7034841d-config\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982737 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-srv-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982771 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf2dd\" (UniqueName: \"kubernetes.io/projected/171f6f42-9983-401e-9aa1-1f1a7dfd412c-kube-api-access-zf2dd\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982820 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-785p5\" (UniqueName: \"kubernetes.io/projected/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-kube-api-access-785p5\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982868 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/46a7e15b-1a37-42ad-8e16-50681c5fccfc-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982931 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-metrics-certs\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.982948 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/67768e9f-2f78-4189-ace5-1bedba6669a7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983161 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b95520b5-bae8-4409-9ae4-ad3763092f1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983178 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgk58\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-kube-api-access-bgk58\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983204 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h9lr\" (UniqueName: \"kubernetes.io/projected/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-kube-api-access-4h9lr\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983219 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/171f6f42-9983-401e-9aa1-1f1a7dfd412c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983234 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsgpz\" (UniqueName: \"kubernetes.io/projected/b95520b5-bae8-4409-9ae4-ad3763092f1c-kube-api-access-vsgpz\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.983251 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0fd98b4c-0217-4784-8bbd-b0ec0680a611-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.984366 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.984391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.984416 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985433 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985520 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-profile-collector-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985605 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz2c5\" (UniqueName: \"kubernetes.io/projected/0fd98b4c-0217-4784-8bbd-b0ec0680a611-kube-api-access-hz2c5\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985681 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st54q\" (UniqueName: \"kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985750 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985817 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-service-ca-bundle\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.985941 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46a7e15b-1a37-42ad-8e16-50681c5fccfc-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986125 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986198 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fs727\" (UniqueName: \"kubernetes.io/projected/1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b-kube-api-access-fs727\") pod \"migrator-59844c95c7-qkq57\" (UID: \"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986271 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986357 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/67768e9f-2f78-4189-ace5-1bedba6669a7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986436 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986531 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjv2\" (UniqueName: \"kubernetes.io/projected/55c9f174-5a49-4d37-9ce6-8640b252c9c2-kube-api-access-cnjv2\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986600 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-default-certificate\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.986665 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46a7e15b-1a37-42ad-8e16-50681c5fccfc-config\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:05 crc kubenswrapper[4926]: E1122 10:42:05.988134 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.488116508 +0000 UTC m=+146.789721875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:05 crc kubenswrapper[4926]: I1122 10:42:05.993381 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:05.999969 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.012480 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.071156 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.089071 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.089850 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.089916 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.089963 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.089988 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090041 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-registration-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090060 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrctf\" (UniqueName: \"kubernetes.io/projected/5a5a2c07-33d5-4376-bee5-375341146a78-kube-api-access-rrctf\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090105 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-certs\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090129 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/873a85f5-ab2c-4594-8968-5104458c4233-metrics-tls\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090151 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-profile-collector-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090192 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz2c5\" (UniqueName: \"kubernetes.io/projected/0fd98b4c-0217-4784-8bbd-b0ec0680a611-kube-api-access-hz2c5\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090248 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st54q\" (UniqueName: \"kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090270 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-mountpoint-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090290 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090357 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-service-ca-bundle\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090383 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46a7e15b-1a37-42ad-8e16-50681c5fccfc-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090407 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090431 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6glh\" (UniqueName: \"kubernetes.io/projected/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-kube-api-access-g6glh\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fs727\" (UniqueName: \"kubernetes.io/projected/1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b-kube-api-access-fs727\") pod \"migrator-59844c95c7-qkq57\" (UID: \"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/67768e9f-2f78-4189-ace5-1bedba6669a7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090632 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-socket-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090654 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090704 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmtzx\" (UniqueName: \"kubernetes.io/projected/286ef5c2-8cda-4b7e-868d-6fd126ef4845-kube-api-access-hmtzx\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090727 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjv2\" (UniqueName: \"kubernetes.io/projected/55c9f174-5a49-4d37-9ce6-8640b252c9c2-kube-api-access-cnjv2\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090770 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-default-certificate\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.090790 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46a7e15b-1a37-42ad-8e16-50681c5fccfc-config\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.091029 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/171f6f42-9983-401e-9aa1-1f1a7dfd412c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.091124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.091294 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-stats-auth\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.091687 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.591658833 +0000 UTC m=+146.893264120 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-csi-data-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094818 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-node-bootstrap-token\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094844 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm522\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094876 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-cert\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.094988 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/873a85f5-ab2c-4594-8968-5104458c4233-config-volume\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.095029 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-proxy-tls\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.095054 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63ad9007-0aba-4183-95d0-d97f7034841d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.095091 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8hwz\" (UniqueName: \"kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.092452 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.093418 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.096377 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.101932 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-service-ca-bundle\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.107290 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.107352 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/46a7e15b-1a37-42ad-8e16-50681c5fccfc-config\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.107370 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63ad9007-0aba-4183-95d0-d97f7034841d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.112719 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-profile-collector-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.112808 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.113172 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/46a7e15b-1a37-42ad-8e16-50681c5fccfc-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.113363 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.114843 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.116207 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.131190 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.131408 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.132285 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/171f6f42-9983-401e-9aa1-1f1a7dfd412c-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.132345 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.132824 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/67768e9f-2f78-4189-ace5-1bedba6669a7-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.133343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-default-certificate\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.133695 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-stats-auth\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.134024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.135232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63ad9007-0aba-4183-95d0-d97f7034841d-config\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.135301 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-srv-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.135330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf2dd\" (UniqueName: \"kubernetes.io/projected/171f6f42-9983-401e-9aa1-1f1a7dfd412c-kube-api-access-zf2dd\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.135384 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-785p5\" (UniqueName: \"kubernetes.io/projected/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-kube-api-access-785p5\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.135411 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q57xd\" (UniqueName: \"kubernetes.io/projected/873a85f5-ab2c-4594-8968-5104458c4233-kube-api-access-q57xd\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.142488 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-8l5sv"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.165869 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zk2x8"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.143387 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-metrics-certs\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.146270 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-metrics-certs\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.158304 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/63ad9007-0aba-4183-95d0-d97f7034841d-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.161929 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-proxy-tls\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.163334 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.165796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/55c9f174-5a49-4d37-9ce6-8640b252c9c2-srv-cert\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.143235 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/63ad9007-0aba-4183-95d0-d97f7034841d-config\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.166047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/67768e9f-2f78-4189-ace5-1bedba6669a7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.166167 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b95520b5-bae8-4409-9ae4-ad3763092f1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.166195 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/46a7e15b-1a37-42ad-8e16-50681c5fccfc-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.167592 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/67768e9f-2f78-4189-ace5-1bedba6669a7-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.180946 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st54q\" (UniqueName: \"kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q\") pod \"marketplace-operator-79b997595-cm8rb\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181052 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgk58\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-kube-api-access-bgk58\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181355 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/171f6f42-9983-401e-9aa1-1f1a7dfd412c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181806 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsgpz\" (UniqueName: \"kubernetes.io/projected/b95520b5-bae8-4409-9ae4-ad3763092f1c-kube-api-access-vsgpz\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h9lr\" (UniqueName: \"kubernetes.io/projected/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-kube-api-access-4h9lr\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181902 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0fd98b4c-0217-4784-8bbd-b0ec0680a611-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.181925 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.182049 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-plugins-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.183611 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.184641 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/b95520b5-bae8-4409-9ae4-ad3763092f1c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.188515 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/171f6f42-9983-401e-9aa1-1f1a7dfd412c-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.188699 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/0fd98b4c-0217-4784-8bbd-b0ec0680a611-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.196813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.199007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm522\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.224435 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz2c5\" (UniqueName: \"kubernetes.io/projected/0fd98b4c-0217-4784-8bbd-b0ec0680a611-kube-api-access-hz2c5\") pod \"control-plane-machine-set-operator-78cbb6b69f-rkbd7\" (UID: \"0fd98b4c-0217-4784-8bbd-b0ec0680a611\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.231122 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.231257 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.240986 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fs727\" (UniqueName: \"kubernetes.io/projected/1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b-kube-api-access-fs727\") pod \"migrator-59844c95c7-qkq57\" (UID: \"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.276193 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjv2\" (UniqueName: \"kubernetes.io/projected/55c9f174-5a49-4d37-9ce6-8640b252c9c2-kube-api-access-cnjv2\") pod \"catalog-operator-68c6474976-zmfpm\" (UID: \"55c9f174-5a49-4d37-9ce6-8640b252c9c2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.284563 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/63ad9007-0aba-4183-95d0-d97f7034841d-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-27btn\" (UID: \"63ad9007-0aba-4183-95d0-d97f7034841d\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.287611 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6glh\" (UniqueName: \"kubernetes.io/projected/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-kube-api-access-g6glh\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.287833 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.288070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-socket-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.288173 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmtzx\" (UniqueName: \"kubernetes.io/projected/286ef5c2-8cda-4b7e-868d-6fd126ef4845-kube-api-access-hmtzx\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.288301 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.788288505 +0000 UTC m=+147.089893792 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.288545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-csi-data-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.288660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-node-bootstrap-token\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.288922 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-cert\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.289713 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/873a85f5-ab2c-4594-8968-5104458c4233-config-volume\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290203 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8hwz\" (UniqueName: \"kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290336 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q57xd\" (UniqueName: \"kubernetes.io/projected/873a85f5-ab2c-4594-8968-5104458c4233-kube-api-access-q57xd\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290463 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290548 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-plugins-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290619 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/873a85f5-ab2c-4594-8968-5104458c4233-config-volume\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.289065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-csi-data-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.289173 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-socket-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.290796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-plugins-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291267 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-registration-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291298 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrctf\" (UniqueName: \"kubernetes.io/projected/5a5a2c07-33d5-4376-bee5-375341146a78-kube-api-access-rrctf\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291335 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-certs\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/873a85f5-ab2c-4594-8968-5104458c4233-metrics-tls\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291406 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-mountpoint-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.291424 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.292364 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-registration-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.292363 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.292419 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a5a2c07-33d5-4376-bee5-375341146a78-mountpoint-dir\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.296040 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.300618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/873a85f5-ab2c-4594-8968-5104458c4233-metrics-tls\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.303125 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-cert\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.305217 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-node-bootstrap-token\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.305875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.307101 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/286ef5c2-8cda-4b7e-868d-6fd126ef4845-certs\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.308300 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-2l9hr"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.326851 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf2dd\" (UniqueName: \"kubernetes.io/projected/171f6f42-9983-401e-9aa1-1f1a7dfd412c-kube-api-access-zf2dd\") pod \"kube-storage-version-migrator-operator-b67b599dd-pcmsx\" (UID: \"171f6f42-9983-401e-9aa1-1f1a7dfd412c\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.339628 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-785p5\" (UniqueName: \"kubernetes.io/projected/cb40abc0-74ba-42e9-bc0d-a4cdfc998421-kube-api-access-785p5\") pod \"machine-config-controller-84d6567774-t4dgw\" (UID: \"cb40abc0-74ba-42e9-bc0d-a4cdfc998421\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.345373 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.354982 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.356859 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jb7x7"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.367014 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.369615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/46a7e15b-1a37-42ad-8e16-50681c5fccfc-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-kkqbv\" (UID: \"46a7e15b-1a37-42ad-8e16-50681c5fccfc\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.378524 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-d4vh5"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.379257 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-js7td"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.384041 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgk58\" (UniqueName: \"kubernetes.io/projected/67768e9f-2f78-4189-ace5-1bedba6669a7-kube-api-access-bgk58\") pod \"cluster-image-registry-operator-dc59b4c8b-j5lpl\" (UID: \"67768e9f-2f78-4189-ace5-1bedba6669a7\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.392263 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.392503 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.892471829 +0000 UTC m=+147.194077126 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.392788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.393326 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.893316873 +0000 UTC m=+147.194922160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.407005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsgpz\" (UniqueName: \"kubernetes.io/projected/b95520b5-bae8-4409-9ae4-ad3763092f1c-kube-api-access-vsgpz\") pod \"package-server-manager-789f6589d5-h55df\" (UID: \"b95520b5-bae8-4409-9ae4-ad3763092f1c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.423107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.426240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" event={"ID":"261fec63-db4b-4580-a128-3cc51da2cc93","Type":"ContainerStarted","Data":"c7ea9e03d629496410aff7874b231e9a0850b58410cdafeae6305ffe78aff1c4"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.426538 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h9lr\" (UniqueName: \"kubernetes.io/projected/12a1d8f7-8b05-42ae-bba2-13bb443eae1e-kube-api-access-4h9lr\") pod \"router-default-5444994796-8k4sd\" (UID: \"12a1d8f7-8b05-42ae-bba2-13bb443eae1e\") " pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.433524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.442037 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8l5sv" event={"ID":"6e0f47bd-848d-4619-a2f9-eb503d04e2e0","Type":"ContainerStarted","Data":"76d53f4c68a733a4172501e2afe12125d1980a930757d8501be35b8c58ca07ff"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.444087 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.447823 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" event={"ID":"32923a07-7dfd-47a6-9b84-6cf7ebd329fa","Type":"ContainerStarted","Data":"e4ac2c01699177bf22c2358e037da15a232ab5f55de62f91a88866cf11ff9a89"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.453320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" event={"ID":"ce9a30da-be0f-49f1-8e0b-40bb1fba706a","Type":"ContainerStarted","Data":"2477155da5ed7e1348298f164923a548f3d685094db32f08c99b087ae44c14d2"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.453364 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" event={"ID":"ce9a30da-be0f-49f1-8e0b-40bb1fba706a","Type":"ContainerStarted","Data":"e6815e2ad1bfc8fb43500737563811d011fdad0525f7984c290f7fabfd20f42e"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.453698 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.453741 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.455751 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" event={"ID":"2d43d07f-3f14-4be9-9801-d40bda91eb2e","Type":"ContainerStarted","Data":"88a393ae7d318b2809cab27cae338f6064c61d1cd86bc37ecde23ff35c6faa86"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.456430 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9ph96"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.456789 4926 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-pdkqk container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.456837 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.458700 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" event={"ID":"fbac19dc-113c-44e6-8744-445e62ea540d","Type":"ContainerStarted","Data":"697f4a4ebd86df4136276139716a6ccac72daf864d0f7ba5fbfd73b98e382c1b"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.461402 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.463990 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6glh\" (UniqueName: \"kubernetes.io/projected/18cc7636-3262-4ea9-b0bc-23d29f6d2d2f-kube-api-access-g6glh\") pod \"ingress-canary-k24qp\" (UID: \"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f\") " pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.465966 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" event={"ID":"f209060d-bbc4-4f07-82e2-1e8b212c1f56","Type":"ContainerStarted","Data":"e72e7de14f08553cb40cb3e5439c586df6b73b0f95a93cf90c5160b64183afb6"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.465998 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" event={"ID":"f209060d-bbc4-4f07-82e2-1e8b212c1f56","Type":"ContainerStarted","Data":"5ebd25bad188162a24f5031d36ee0b7e13091fb7d8f88e5708981b39de855ed0"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.470977 4926 generic.go:334] "Generic (PLEG): container finished" podID="c14868fd-4ccf-4779-81e8-dc3b30393f1f" containerID="c14fa7d9e45ac22aa1914e0dc641ddc60c79cbfd064d4dfcc39520d4512dc22a" exitCode=0 Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.471035 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" event={"ID":"c14868fd-4ccf-4779-81e8-dc3b30393f1f","Type":"ContainerDied","Data":"c14fa7d9e45ac22aa1914e0dc641ddc60c79cbfd064d4dfcc39520d4512dc22a"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.471059 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" event={"ID":"c14868fd-4ccf-4779-81e8-dc3b30393f1f","Type":"ContainerStarted","Data":"e48c805dd06d522424c9a10359ca4283db907f3d7d7066b28ac894f3cd08b760"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.471661 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.474236 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" event={"ID":"62b979b0-adbc-4c29-a08d-55bb2d07fc6c","Type":"ContainerStarted","Data":"5a93a4185a03ebe14af53da003ad356cd9d77a7b11199b450245ff54434552da"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.478714 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" event={"ID":"0dc0778b-7907-42c7-a656-22c517a50c3a","Type":"ContainerStarted","Data":"fb555492e9f844052f5832e50f8c7af533b6b0b9c80c79b89a9a0ed4be7faca3"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.478753 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" event={"ID":"0dc0778b-7907-42c7-a656-22c517a50c3a","Type":"ContainerStarted","Data":"ec8a34be789389a9e829fd783fba32089896ffed8c7bda6b7f4e3d9657b202ad"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.479618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmtzx\" (UniqueName: \"kubernetes.io/projected/286ef5c2-8cda-4b7e-868d-6fd126ef4845-kube-api-access-hmtzx\") pod \"machine-config-server-qhj5l\" (UID: \"286ef5c2-8cda-4b7e-868d-6fd126ef4845\") " pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.479872 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.481518 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nsj2w" event={"ID":"ae977eb4-8273-4dab-9e39-80c36ccd63e2","Type":"ContainerStarted","Data":"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.481554 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nsj2w" event={"ID":"ae977eb4-8273-4dab-9e39-80c36ccd63e2","Type":"ContainerStarted","Data":"dfc574c382b4bbf023e7b4df1fa5c865f3cec91fbb1e5daf5222d7d7e9d1ba47"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.483487 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" event={"ID":"831547cf-3937-4d20-83b8-9570e309f0b3","Type":"ContainerStarted","Data":"0a54ef2695f173644f09f0124c7a91ee96369c12f066784ece97f1d4f17ccaa0"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.489161 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" event={"ID":"dac98899-4c89-48b4-bf2d-0aee1eb95eff","Type":"ContainerStarted","Data":"a2a56f8ca4a48e3b8ad1f9f2cb1da3d3e2d9ac45f3afe7d3b2b4c7b5fb5ad3bf"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.489212 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" event={"ID":"dac98899-4c89-48b4-bf2d-0aee1eb95eff","Type":"ContainerStarted","Data":"f41439c3a9157304b4ee70780cf068454bb07d42fe995c5da53498e6ccab660e"} Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.495925 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.497254 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q57xd\" (UniqueName: \"kubernetes.io/projected/873a85f5-ab2c-4594-8968-5104458c4233-kube-api-access-q57xd\") pod \"dns-default-mwqpv\" (UID: \"873a85f5-ab2c-4594-8968-5104458c4233\") " pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.497278 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:06.99725283 +0000 UTC m=+147.298858117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: W1122 10:42:06.501706 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc44b98ae_cd26_48e0_9c72_725fd64d22f9.slice/crio-67da05e0080e7360cf8ebf2e56ab8e0cefd81b8e9e30a4537448e8d0de450701 WatchSource:0}: Error finding container 67da05e0080e7360cf8ebf2e56ab8e0cefd81b8e9e30a4537448e8d0de450701: Status 404 returned error can't find the container with id 67da05e0080e7360cf8ebf2e56ab8e0cefd81b8e9e30a4537448e8d0de450701 Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.518806 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8hwz\" (UniqueName: \"kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz\") pod \"collect-profiles-29396790-vw7kn\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: W1122 10:42:06.524521 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40a4057a_5a42_463f_aeb2_995754abca81.slice/crio-9a0d4da54121b332880e6ca0e4dd2c96710406283ea745c378ae9ab48e9980ff WatchSource:0}: Error finding container 9a0d4da54121b332880e6ca0e4dd2c96710406283ea745c378ae9ab48e9980ff: Status 404 returned error can't find the container with id 9a0d4da54121b332880e6ca0e4dd2c96710406283ea745c378ae9ab48e9980ff Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.530593 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-k24qp" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.537755 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-qhj5l" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.542976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.545112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrctf\" (UniqueName: \"kubernetes.io/projected/5a5a2c07-33d5-4376-bee5-375341146a78-kube-api-access-rrctf\") pod \"csi-hostpathplugin-p5j7r\" (UID: \"5a5a2c07-33d5-4376-bee5-375341146a78\") " pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: W1122 10:42:06.556584 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9464da92_3307_4d40_8643_133ecf84d523.slice/crio-95fc1d02e1d876959cfc2e402db7c4b165c22287ff8531490a7fcd0ced378581 WatchSource:0}: Error finding container 95fc1d02e1d876959cfc2e402db7c4b165c22287ff8531490a7fcd0ced378581: Status 404 returned error can't find the container with id 95fc1d02e1d876959cfc2e402db7c4b165c22287ff8531490a7fcd0ced378581 Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.597045 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.599292 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.099276622 +0000 UTC m=+147.400881909 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.621261 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.628029 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-jkqsx"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.621535 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.639761 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.695454 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.698783 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.698937 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.198917316 +0000 UTC m=+147.500522603 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.699070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.699412 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.19940073 +0000 UTC m=+147.501006017 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.707406 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.803742 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.804186 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.804745 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.304724697 +0000 UTC m=+147.606329984 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.827644 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.869746 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.871514 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.880737 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.881929 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.883707 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.896493 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.906693 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:06 crc kubenswrapper[4926]: E1122 10:42:06.907119 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.407099319 +0000 UTC m=+147.708704676 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.968484 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm"] Nov 22 10:42:06 crc kubenswrapper[4926]: I1122 10:42:06.985833 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.009146 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.009979 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.509963265 +0000 UTC m=+147.811568542 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.028745 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv"] Nov 22 10:42:07 crc kubenswrapper[4926]: W1122 10:42:07.043584 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3711dcd6_7ec3_4610_857d_e24f38c6e986.slice/crio-61ddc53fc7d70ad7cec710d2547600bf430d939d04bcde045f91f5facd7c1597 WatchSource:0}: Error finding container 61ddc53fc7d70ad7cec710d2547600bf430d939d04bcde045f91f5facd7c1597: Status 404 returned error can't find the container with id 61ddc53fc7d70ad7cec710d2547600bf430d939d04bcde045f91f5facd7c1597 Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.110701 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.110972 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.610961188 +0000 UTC m=+147.912566475 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.185722 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.214767 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.215781 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.715765049 +0000 UTC m=+148.017370336 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: W1122 10:42:07.255678 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb95520b5_bae8_4409_9ae4_ad3763092f1c.slice/crio-1a1321b3fcbe9abac62d7df7c8d104720fe4839a06aab4ce80754d4c6844912d WatchSource:0}: Error finding container 1a1321b3fcbe9abac62d7df7c8d104720fe4839a06aab4ce80754d4c6844912d: Status 404 returned error can't find the container with id 1a1321b3fcbe9abac62d7df7c8d104720fe4839a06aab4ce80754d4c6844912d Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.316688 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.317222 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.817206395 +0000 UTC m=+148.118811692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.348704 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.417774 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.417870 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.917853197 +0000 UTC m=+148.219458484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.418250 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.418856 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:07.918836986 +0000 UTC m=+148.220442273 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.459693 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.465307 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-mwqpv"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.481504 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-k24qp"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.513740 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" event={"ID":"3711dcd6-7ec3-4610-857d-e24f38c6e986","Type":"ContainerStarted","Data":"61ddc53fc7d70ad7cec710d2547600bf430d939d04bcde045f91f5facd7c1597"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.519414 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.519604 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.019576201 +0000 UTC m=+148.321181498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.519654 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.520234 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.020219339 +0000 UTC m=+148.321824626 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.523811 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.524510 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" event={"ID":"05be3235-93e8-4d5c-8f34-9c47f694bb1b","Type":"ContainerStarted","Data":"411a753bcf1e6c04d47b82d9a06601aa0211da947ac9f8977728d3abc99efbeb"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.524559 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" event={"ID":"05be3235-93e8-4d5c-8f34-9c47f694bb1b","Type":"ContainerStarted","Data":"206233c755aaabfbb14f3b3ea9a400145da22f15bd646d3fef550969cd4679ae"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.530688 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" event={"ID":"55c9f174-5a49-4d37-9ce6-8640b252c9c2","Type":"ContainerStarted","Data":"76cd8504accbf65dfd791b8a4f3a02b8bd9085253617d6fa8a750ea18414fb61"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.532262 4926 generic.go:334] "Generic (PLEG): container finished" podID="c44b98ae-cd26-48e0-9c72-725fd64d22f9" containerID="5817cfc216a5c7a4f6812c5614699df3e12fadb2b2819e67a81be3f2bdd0850f" exitCode=0 Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.532363 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" event={"ID":"c44b98ae-cd26-48e0-9c72-725fd64d22f9","Type":"ContainerDied","Data":"5817cfc216a5c7a4f6812c5614699df3e12fadb2b2819e67a81be3f2bdd0850f"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.532424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" event={"ID":"c44b98ae-cd26-48e0-9c72-725fd64d22f9","Type":"ContainerStarted","Data":"67da05e0080e7360cf8ebf2e56ab8e0cefd81b8e9e30a4537448e8d0de450701"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.533437 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8k4sd" event={"ID":"12a1d8f7-8b05-42ae-bba2-13bb443eae1e","Type":"ContainerStarted","Data":"c805dbfb2f8c37f6a0207040afb8e38199d47ad4bbee3008da187a27bca7f038"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.534553 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" event={"ID":"261fec63-db4b-4580-a128-3cc51da2cc93","Type":"ContainerStarted","Data":"bbc872f0466e9661863f987d6dadc2869fd2bd868a1ebb8e25c4fd9217c89e09"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.534762 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.536951 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" event={"ID":"4943246c-40df-4927-8380-b7d2804a17f7","Type":"ContainerStarted","Data":"393815e471efcaa36bedf893a46f88aabbf3f11a3a275a9986f752c4d3e48211"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.538323 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" event={"ID":"b95520b5-bae8-4409-9ae4-ad3763092f1c","Type":"ContainerStarted","Data":"1a1321b3fcbe9abac62d7df7c8d104720fe4839a06aab4ce80754d4c6844912d"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.538393 4926 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-brhnh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.538441 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" podUID="261fec63-db4b-4580-a128-3cc51da2cc93" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.539461 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" event={"ID":"63ad9007-0aba-4183-95d0-d97f7034841d","Type":"ContainerStarted","Data":"86dc216c4f2a4ac3d8b54b4b72d06783bee994ac924bde20946a9337845e4cd1"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.542244 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" event={"ID":"f209060d-bbc4-4f07-82e2-1e8b212c1f56","Type":"ContainerStarted","Data":"cc6d7116acc62c3bad6882b6fe378b9e615b6336725a595641eb565a1a72357b"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.566777 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-p5j7r"] Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.568665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" event={"ID":"fbac19dc-113c-44e6-8744-445e62ea540d","Type":"ContainerStarted","Data":"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.569289 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.570351 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-c56h5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.570392 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.571108 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" event={"ID":"731bcd4c-12b4-408c-a30b-dd7ccd6a0712","Type":"ContainerStarted","Data":"18408e12ee1284e7e67a7ed85ca531df4cbb17445abbc5d956142fe61d1116c3"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.571136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" event={"ID":"731bcd4c-12b4-408c-a30b-dd7ccd6a0712","Type":"ContainerStarted","Data":"385df45c636c33d8ab233c8a9691a2086995f19501a205e22d9656f32e92448f"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.578655 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" event={"ID":"62b979b0-adbc-4c29-a08d-55bb2d07fc6c","Type":"ContainerStarted","Data":"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.578913 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.581471 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" event={"ID":"c14868fd-4ccf-4779-81e8-dc3b30393f1f","Type":"ContainerStarted","Data":"69c786b397adf58c9203e4bc89b6209035aeb97ad587dbeded090cc94528deb8"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.581840 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.581948 4926 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-g6m6g container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.581973 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 22 10:42:07 crc kubenswrapper[4926]: W1122 10:42:07.584069 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod171f6f42_9983_401e_9aa1_1f1a7dfd412c.slice/crio-58ab4e41bc86bfbcdfe44a53479d1818046b613aa92b720fb307acc615f8c495 WatchSource:0}: Error finding container 58ab4e41bc86bfbcdfe44a53479d1818046b613aa92b720fb307acc615f8c495: Status 404 returned error can't find the container with id 58ab4e41bc86bfbcdfe44a53479d1818046b613aa92b720fb307acc615f8c495 Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.586315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" event={"ID":"e7bb5a83-ce79-42db-a6d0-996b1405d668","Type":"ContainerStarted","Data":"a61bdbdf8c87765d74609e84309b000158c0f7f927b78c87b3431b2f9d3b555c"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.586384 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" event={"ID":"e7bb5a83-ce79-42db-a6d0-996b1405d668","Type":"ContainerStarted","Data":"918df2a736dbe254f433b8b882c841dfcee22cb5a48b733a39e53536e9fa908b"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.587480 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" event={"ID":"46a7e15b-1a37-42ad-8e16-50681c5fccfc","Type":"ContainerStarted","Data":"fd44f7eccf65f6df90f39fa400c33d3e49b9df82f3d55f7c4510f6aefeeee8a3"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.588541 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" event={"ID":"67768e9f-2f78-4189-ace5-1bedba6669a7","Type":"ContainerStarted","Data":"17e38003b61615333f542a0e7bf1688852da22cdc3deccbe0bb2444abe5b41ed"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.590707 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8l5sv" event={"ID":"6e0f47bd-848d-4619-a2f9-eb503d04e2e0","Type":"ContainerStarted","Data":"4808990dd4c5d9d82845666b187d49bdb8e2017d77bdcb7f8436e88a90e530df"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.590870 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.592246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" event={"ID":"0fd98b4c-0217-4784-8bbd-b0ec0680a611","Type":"ContainerStarted","Data":"c2587fa255dadcf6a661c1c2318d136a24480abcd0017729e852b68a7d1a1b85"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.592557 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.592582 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.593245 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" event={"ID":"40a4057a-5a42-463f-aeb2-995754abca81","Type":"ContainerStarted","Data":"9a0d4da54121b332880e6ca0e4dd2c96710406283ea745c378ae9ab48e9980ff"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.594440 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" event={"ID":"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b","Type":"ContainerStarted","Data":"9cceb095434dfa9e27bfe4922427d324ac828d72a4ab387d01bf34afb8072f39"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.598739 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" event={"ID":"e0ad330e-054d-4f9f-89e1-2e18fca1e66a","Type":"ContainerStarted","Data":"f16dab2babc1a552ed3bcd7d13a5757ee440a3c593fd70486a0d77f77387c540"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.600104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" event={"ID":"9464da92-3307-4d40-8643-133ecf84d523","Type":"ContainerStarted","Data":"95fc1d02e1d876959cfc2e402db7c4b165c22287ff8531490a7fcd0ced378581"} Nov 22 10:42:07 crc kubenswrapper[4926]: W1122 10:42:07.600285 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod873a85f5_ab2c_4594_8968_5104458c4233.slice/crio-cd9ac61c3c1deacbe190cfb4e184c73d71fa84751431eb8aed5703ae9ef27f5c WatchSource:0}: Error finding container cd9ac61c3c1deacbe190cfb4e184c73d71fa84751431eb8aed5703ae9ef27f5c: Status 404 returned error can't find the container with id cd9ac61c3c1deacbe190cfb4e184c73d71fa84751431eb8aed5703ae9ef27f5c Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.605491 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" event={"ID":"3e81d5b7-f92f-4de5-bb55-512620175698","Type":"ContainerStarted","Data":"16402ae0a8c5561c3058ada7162b332818cf33a86f4dc604f2624cde1818bf02"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.606609 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" event={"ID":"f9451a32-8743-493b-87a2-e7473354f0a4","Type":"ContainerStarted","Data":"aac561efd239d80b526a2609393ea800e032f6b06b2315d48cea835eea389995"} Nov 22 10:42:07 crc kubenswrapper[4926]: W1122 10:42:07.609662 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod18cc7636_3262_4ea9_b0bc_23d29f6d2d2f.slice/crio-a7f217279c9ff09c77ec4de4bb793169ca892d989f92c0a6e080e2e7ac243ec6 WatchSource:0}: Error finding container a7f217279c9ff09c77ec4de4bb793169ca892d989f92c0a6e080e2e7ac243ec6: Status 404 returned error can't find the container with id a7f217279c9ff09c77ec4de4bb793169ca892d989f92c0a6e080e2e7ac243ec6 Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.610455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" event={"ID":"80a51b2f-5a67-4cb4-ab41-09516cec7e4d","Type":"ContainerStarted","Data":"ef90e0a7f51971c34af4381a9ce7ce95f3989be4d21c449ee9edff81249d40d6"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.610484 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" event={"ID":"80a51b2f-5a67-4cb4-ab41-09516cec7e4d","Type":"ContainerStarted","Data":"9fd3ff8e7f9fb9480263758459ee2145ed7acefeeb926f28c2523749fcb6d42d"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.611738 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-qhj5l" event={"ID":"286ef5c2-8cda-4b7e-868d-6fd126ef4845","Type":"ContainerStarted","Data":"44a3ea83a8370a5e5f4184786143c83101a80b65399e15a708a56fd0a81dcedf"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.620309 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.620779 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.120764889 +0000 UTC m=+148.422370176 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.625746 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" event={"ID":"32923a07-7dfd-47a6-9b84-6cf7ebd329fa","Type":"ContainerStarted","Data":"2c261999c36511b36eb7f704f9991bfa583ac8dfac644160735251b37cf05812"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.627079 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" event={"ID":"2d43d07f-3f14-4be9-9801-d40bda91eb2e","Type":"ContainerStarted","Data":"b69af5097ba1d53d7f0e50717e867d153fe7a5b9fc534f36a5f7db8cdd6d24ea"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.627545 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.628795 4926 generic.go:334] "Generic (PLEG): container finished" podID="831547cf-3937-4d20-83b8-9570e309f0b3" containerID="1e48c9dfcaaa3fe7b15b931b343dd09aeb236aa72986d39f482604955146edc0" exitCode=0 Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.628903 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" event={"ID":"831547cf-3937-4d20-83b8-9570e309f0b3","Type":"ContainerDied","Data":"1e48c9dfcaaa3fe7b15b931b343dd09aeb236aa72986d39f482604955146edc0"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.630472 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" event={"ID":"e0882887-a6d9-4aac-a7d7-c14b934298e2","Type":"ContainerStarted","Data":"c130183c238722b70099ab1bbdadb05c1da96ad8cc8a4476619a123a413d826f"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.637298 4926 patch_prober.go:28] interesting pod/console-operator-58897d9998-zk2x8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.637336 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" podUID="2d43d07f-3f14-4be9-9801-d40bda91eb2e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.645576 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" event={"ID":"dac98899-4c89-48b4-bf2d-0aee1eb95eff","Type":"ContainerStarted","Data":"58792fd4fac0888d6104bdc4686d2597a4cb6c1f2d6a2b600f9f43b9a0fe7bfb"} Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.724700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.725503 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.225491069 +0000 UTC m=+148.527096356 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.740329 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-wvt6w" podStartSLOduration=128.740314003 podStartE2EDuration="2m8.740314003s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:07.739312135 +0000 UTC m=+148.040917422" watchObservedRunningTime="2025-11-22 10:42:07.740314003 +0000 UTC m=+148.041919290" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.826327 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.826719 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.326700467 +0000 UTC m=+148.628305754 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.862320 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-nsj2w" podStartSLOduration=127.862299247 podStartE2EDuration="2m7.862299247s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:07.816000841 +0000 UTC m=+148.117606128" watchObservedRunningTime="2025-11-22 10:42:07.862299247 +0000 UTC m=+148.163904534" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.929072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:07 crc kubenswrapper[4926]: E1122 10:42:07.929523 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.429507792 +0000 UTC m=+148.731113079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.940379 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" podStartSLOduration=127.940357193 podStartE2EDuration="2m7.940357193s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:07.937559023 +0000 UTC m=+148.239164320" watchObservedRunningTime="2025-11-22 10:42:07.940357193 +0000 UTC m=+148.241962480" Nov 22 10:42:07 crc kubenswrapper[4926]: I1122 10:42:07.954133 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.030590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.031064 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.53104308 +0000 UTC m=+148.832648377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.110501 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-2l9hr" podStartSLOduration=128.110482795 podStartE2EDuration="2m8.110482795s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.110233608 +0000 UTC m=+148.411838885" watchObservedRunningTime="2025-11-22 10:42:08.110482795 +0000 UTC m=+148.412088072" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.133777 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.134108 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.634098092 +0000 UTC m=+148.935703379 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.143766 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-944rj" podStartSLOduration=128.143753308 podStartE2EDuration="2m8.143753308s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.142552334 +0000 UTC m=+148.444157621" watchObservedRunningTime="2025-11-22 10:42:08.143753308 +0000 UTC m=+148.445358595" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.193811 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-dl4z4" podStartSLOduration=128.193794652 podStartE2EDuration="2m8.193794652s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.190626301 +0000 UTC m=+148.492231608" watchObservedRunningTime="2025-11-22 10:42:08.193794652 +0000 UTC m=+148.495399939" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.247431 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.247722 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.747706906 +0000 UTC m=+149.049312193 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.268443 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-8l5sv" podStartSLOduration=128.268420829 podStartE2EDuration="2m8.268420829s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.227379093 +0000 UTC m=+148.528984390" watchObservedRunningTime="2025-11-22 10:42:08.268420829 +0000 UTC m=+148.570026126" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.279903 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-rndxt" podStartSLOduration=128.279864157 podStartE2EDuration="2m8.279864157s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.270656743 +0000 UTC m=+148.572262030" watchObservedRunningTime="2025-11-22 10:42:08.279864157 +0000 UTC m=+148.581469444" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.297918 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-ztdwg" podStartSLOduration=129.297903453 podStartE2EDuration="2m9.297903453s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.296252516 +0000 UTC m=+148.597857803" watchObservedRunningTime="2025-11-22 10:42:08.297903453 +0000 UTC m=+148.599508740" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.347762 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" podStartSLOduration=128.347743501 podStartE2EDuration="2m8.347743501s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.347588557 +0000 UTC m=+148.649193844" watchObservedRunningTime="2025-11-22 10:42:08.347743501 +0000 UTC m=+148.649348788" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.348411 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.348465 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.348507 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.350004 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.849992175 +0000 UTC m=+149.151597462 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.356530 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.369127 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.383148 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" podStartSLOduration=128.383131995 podStartE2EDuration="2m8.383131995s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.375352392 +0000 UTC m=+148.676957679" watchObservedRunningTime="2025-11-22 10:42:08.383131995 +0000 UTC m=+148.684737282" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.449642 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.449824 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.449865 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.450611 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:08.950574836 +0000 UTC m=+149.252180123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.457431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.462496 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.502199 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podStartSLOduration=128.502179264 podStartE2EDuration="2m8.502179264s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.500327001 +0000 UTC m=+148.801932288" watchObservedRunningTime="2025-11-22 10:42:08.502179264 +0000 UTC m=+148.803784541" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.550061 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" podStartSLOduration=128.550044914 podStartE2EDuration="2m8.550044914s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.546999527 +0000 UTC m=+148.848604834" watchObservedRunningTime="2025-11-22 10:42:08.550044914 +0000 UTC m=+148.851650201" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.552854 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.553234 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.053221735 +0000 UTC m=+149.354827012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.597575 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" podStartSLOduration=129.597553515 podStartE2EDuration="2m9.597553515s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.587432085 +0000 UTC m=+148.889037372" watchObservedRunningTime="2025-11-22 10:42:08.597553515 +0000 UTC m=+148.899158802" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.605917 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.621412 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.654292 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.654689 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.154674821 +0000 UTC m=+149.456280108 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.672361 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" event={"ID":"e0ad330e-054d-4f9f-89e1-2e18fca1e66a","Type":"ContainerStarted","Data":"1a497eb6cdbc8949b5922e9ef07bdd5a49a9684c8ab1edb2073284586c04c796"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.672402 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" event={"ID":"e0ad330e-054d-4f9f-89e1-2e18fca1e66a","Type":"ContainerStarted","Data":"defdcb5d1f929eab7c69fa89662ba68d68b24959ff54df77e64fcd0f6fcad696"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.698610 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" event={"ID":"46a7e15b-1a37-42ad-8e16-50681c5fccfc","Type":"ContainerStarted","Data":"d539440922294b02a3b5b6780612640ab938253d0fd21630745df2dcb581a640"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.703092 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.704635 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-wlvg2" podStartSLOduration=128.704614191 podStartE2EDuration="2m8.704614191s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.701194073 +0000 UTC m=+149.002799360" watchObservedRunningTime="2025-11-22 10:42:08.704614191 +0000 UTC m=+149.006219478" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.716864 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-kkqbv" podStartSLOduration=128.716842952 podStartE2EDuration="2m8.716842952s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.71505162 +0000 UTC m=+149.016656907" watchObservedRunningTime="2025-11-22 10:42:08.716842952 +0000 UTC m=+149.018448239" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.729127 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" event={"ID":"55c9f174-5a49-4d37-9ce6-8640b252c9c2","Type":"ContainerStarted","Data":"41cbd5e6980d0b01c16bcf538f7a69dbcf75d89cea02d94594b1de90274e3d0d"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.730371 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.736230 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-qhj5l" event={"ID":"286ef5c2-8cda-4b7e-868d-6fd126ef4845","Type":"ContainerStarted","Data":"afab405f4ef971a913359b72de6eef87ca2759d642f14cf09fe2dd8a350c3a2c"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.745768 4926 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zmfpm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.745824 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" podUID="55c9f174-5a49-4d37-9ce6-8640b252c9c2" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.746455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" event={"ID":"cb40abc0-74ba-42e9-bc0d-a4cdfc998421","Type":"ContainerStarted","Data":"13f3dce6b9e309f6233bf4f855d677072c92f402a04d79c9f732dbb1f03a7587"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.746490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" event={"ID":"cb40abc0-74ba-42e9-bc0d-a4cdfc998421","Type":"ContainerStarted","Data":"16b90f74fa215399b428bca91d5d0cceb649d87defcd89bcfa567ec42a626c02"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.749956 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" podStartSLOduration=128.74994496 podStartE2EDuration="2m8.74994496s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.747915291 +0000 UTC m=+149.049520578" watchObservedRunningTime="2025-11-22 10:42:08.74994496 +0000 UTC m=+149.051550247" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.753274 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" event={"ID":"40a4057a-5a42-463f-aeb2-995754abca81","Type":"ContainerStarted","Data":"4268fb2698b7dacd9e7e164a5fad508dfe948ef0b8311f1d209f3646506bbd78"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.755433 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.756308 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.256297952 +0000 UTC m=+149.557903239 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.762252 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" event={"ID":"e0882887-a6d9-4aac-a7d7-c14b934298e2","Type":"ContainerStarted","Data":"f01a732b4879c9199baca01d6fa23b42e82159f44339616cd3387554d8e3f4dc"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.764297 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" event={"ID":"731bcd4c-12b4-408c-a30b-dd7ccd6a0712","Type":"ContainerStarted","Data":"63fd66595863c88d41ffe622a1a6d076a51a937707f6d3e2be183f3bf35d5126"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.765933 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" event={"ID":"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b","Type":"ContainerStarted","Data":"c078a0f1b4c40a55108862779db674070a58d7f4f5c331a7fb820f63ee89e382"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.766677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mwqpv" event={"ID":"873a85f5-ab2c-4594-8968-5104458c4233","Type":"ContainerStarted","Data":"cd9ac61c3c1deacbe190cfb4e184c73d71fa84751431eb8aed5703ae9ef27f5c"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.769617 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" event={"ID":"4943246c-40df-4927-8380-b7d2804a17f7","Type":"ContainerStarted","Data":"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.770375 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.771473 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" event={"ID":"3711dcd6-7ec3-4610-857d-e24f38c6e986","Type":"ContainerStarted","Data":"e8d510f181d8624ea4c5ac2be842a54a85915c19f6dfc5db7be5a4c1d4dae1f2"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.775510 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.788344 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" event={"ID":"67768e9f-2f78-4189-ace5-1bedba6669a7","Type":"ContainerStarted","Data":"a2f3863eb3eea5b440a13b71b4478e5b2dfa943e12f0e0d00a43ad57d0d8ed88"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.788503 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-qhj5l" podStartSLOduration=5.788489144 podStartE2EDuration="5.788489144s" podCreationTimestamp="2025-11-22 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.787784393 +0000 UTC m=+149.089389680" watchObservedRunningTime="2025-11-22 10:42:08.788489144 +0000 UTC m=+149.090094441" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.805148 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-8k4sd" event={"ID":"12a1d8f7-8b05-42ae-bba2-13bb443eae1e","Type":"ContainerStarted","Data":"fb7d1aa2108dc22c1f83aedefeea22d4be0b214d8ee694aedc6676133c0a63e4"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.807169 4926 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-rwrgv container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.807215 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" podUID="3711dcd6-7ec3-4610-857d-e24f38c6e986" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.807297 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cm8rb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.807313 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.837718 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" event={"ID":"05be3235-93e8-4d5c-8f34-9c47f694bb1b","Type":"ContainerStarted","Data":"eccc04c856cba723efcb3557f6f9e37b309ec42aaf39fc290714f631eaeb5778"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.846149 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" event={"ID":"b95520b5-bae8-4409-9ae4-ad3763092f1c","Type":"ContainerStarted","Data":"37f8c8fa73937ea44698ef86ccd8caf45d552ea9f18d593ded2f18df923f9269"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.851427 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" event={"ID":"f9451a32-8743-493b-87a2-e7473354f0a4","Type":"ContainerStarted","Data":"caebfc8046c4b4a6547ed52a054922943cbff3436665924e9208fd27ae12adec"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.851903 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" podStartSLOduration=128.851870599 podStartE2EDuration="2m8.851870599s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.846491655 +0000 UTC m=+149.148096962" watchObservedRunningTime="2025-11-22 10:42:08.851870599 +0000 UTC m=+149.153475886" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.852198 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" podStartSLOduration=128.852192608 podStartE2EDuration="2m8.852192608s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.818346859 +0000 UTC m=+149.119952146" watchObservedRunningTime="2025-11-22 10:42:08.852192608 +0000 UTC m=+149.153797885" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.854247 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" event={"ID":"3e81d5b7-f92f-4de5-bb55-512620175698","Type":"ContainerStarted","Data":"e892c01569b03c63121a0e3d0ffbdf9952bc431a8ecb162ba34def7ab1eb32ac"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.855770 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.856683 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" event={"ID":"f6332434-11ab-46ab-8379-c056d2c292b5","Type":"ContainerStarted","Data":"3bc4c73d77c40218b9b6c248cd942048e0a507829d42cc11887f49232b4f78e8"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.856705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" event={"ID":"f6332434-11ab-46ab-8379-c056d2c292b5","Type":"ContainerStarted","Data":"9c8d58efb6dcccfd45042b4a981bcbad9c0ed86f357853987637f580841bc003"} Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.857021 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.357004186 +0000 UTC m=+149.658609473 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.874136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" event={"ID":"63ad9007-0aba-4183-95d0-d97f7034841d","Type":"ContainerStarted","Data":"07346547ed583e960f0c497e412df6a71b4161aab11a7d8f330eed6c9fb00bc4"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.878693 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-pzqp7" podStartSLOduration=128.878676177 podStartE2EDuration="2m8.878676177s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.876205056 +0000 UTC m=+149.177810343" watchObservedRunningTime="2025-11-22 10:42:08.878676177 +0000 UTC m=+149.180281464" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.901041 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-8k4sd" podStartSLOduration=128.901020127 podStartE2EDuration="2m8.901020127s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.89939715 +0000 UTC m=+149.201002437" watchObservedRunningTime="2025-11-22 10:42:08.901020127 +0000 UTC m=+149.202625414" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.908865 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" event={"ID":"171f6f42-9983-401e-9aa1-1f1a7dfd412c","Type":"ContainerStarted","Data":"1665c2088a9559d97a2745c8c0a660090ab3677264a43450fae0bcc57812502e"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.908926 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" event={"ID":"171f6f42-9983-401e-9aa1-1f1a7dfd412c","Type":"ContainerStarted","Data":"58ab4e41bc86bfbcdfe44a53479d1818046b613aa92b720fb307acc615f8c495"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.924697 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" event={"ID":"9464da92-3307-4d40-8643-133ecf84d523","Type":"ContainerStarted","Data":"480b5327086917c638dbd8db31aeb1325b6ac0e9e8a16d143217b95346f747c0"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.928378 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" event={"ID":"5a5a2c07-33d5-4376-bee5-375341146a78","Type":"ContainerStarted","Data":"86a7114fd7d11e41438723859aa47881ef1dc9923beee15865dab5af15ccb023"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.929487 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-k24qp" event={"ID":"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f","Type":"ContainerStarted","Data":"2ea114f43ac81bdf772b1897b30df9d8beee13e3079e10de57ac58dfdb4c2730"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.929504 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-k24qp" event={"ID":"18cc7636-3262-4ea9-b0bc-23d29f6d2d2f","Type":"ContainerStarted","Data":"a7f217279c9ff09c77ec4de4bb793169ca892d989f92c0a6e080e2e7ac243ec6"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.942291 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-j5lpl" podStartSLOduration=128.942274898 podStartE2EDuration="2m8.942274898s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.935650308 +0000 UTC m=+149.237255595" watchObservedRunningTime="2025-11-22 10:42:08.942274898 +0000 UTC m=+149.243880175" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.960647 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.962844 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" event={"ID":"0fd98b4c-0217-4784-8bbd-b0ec0680a611","Type":"ContainerStarted","Data":"06bb7b426b8d48f6348d7517038b7db67ad32d1e5982289b72ffc5d8d5daed90"} Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.963966 4926 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-brhnh container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.963993 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" podUID="261fec63-db4b-4580-a128-3cc51da2cc93" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: E1122 10:42:08.965766 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.465751451 +0000 UTC m=+149.767356738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.966104 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-c56h5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.966142 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.967064 4926 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-g6m6g container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.967267 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.10:6443/healthz\": dial tcp 10.217.0.10:6443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.967326 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.967357 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.968284 4926 patch_prober.go:28] interesting pod/console-operator-58897d9998-zk2x8 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.968304 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" podUID="2d43d07f-3f14-4be9-9801-d40bda91eb2e" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.17:8443/readyz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 22 10:42:08 crc kubenswrapper[4926]: I1122 10:42:08.981942 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-k24qp" podStartSLOduration=5.981837351 podStartE2EDuration="5.981837351s" podCreationTimestamp="2025-11-22 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:08.981703477 +0000 UTC m=+149.283308774" watchObservedRunningTime="2025-11-22 10:42:08.981837351 +0000 UTC m=+149.283442638" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.057308 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-27btn" podStartSLOduration=129.057288672 podStartE2EDuration="2m9.057288672s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.056384986 +0000 UTC m=+149.357990273" watchObservedRunningTime="2025-11-22 10:42:09.057288672 +0000 UTC m=+149.358893959" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.061895 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.063051 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.563030877 +0000 UTC m=+149.864636164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.063067 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pcmsx" podStartSLOduration=129.063046117 podStartE2EDuration="2m9.063046117s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.015269179 +0000 UTC m=+149.316874466" watchObservedRunningTime="2025-11-22 10:42:09.063046117 +0000 UTC m=+149.364651394" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.139319 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9ph96" podStartSLOduration=129.139299021 podStartE2EDuration="2m9.139299021s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.112929256 +0000 UTC m=+149.414534553" watchObservedRunningTime="2025-11-22 10:42:09.139299021 +0000 UTC m=+149.440904308" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.164402 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.164722 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.664711869 +0000 UTC m=+149.966317156 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.190776 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-dcbsr" podStartSLOduration=129.190755255 podStartE2EDuration="2m9.190755255s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.190132087 +0000 UTC m=+149.491737374" watchObservedRunningTime="2025-11-22 10:42:09.190755255 +0000 UTC m=+149.492360542" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.192057 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5mlfw" podStartSLOduration=129.192049742 podStartE2EDuration="2m9.192049742s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.137404097 +0000 UTC m=+149.439009384" watchObservedRunningTime="2025-11-22 10:42:09.192049742 +0000 UTC m=+149.493655029" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.268778 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" podStartSLOduration=129.268754919 podStartE2EDuration="2m9.268754919s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.222050441 +0000 UTC m=+149.523655728" watchObservedRunningTime="2025-11-22 10:42:09.268754919 +0000 UTC m=+149.570360206" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.272145 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.272452 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.772433614 +0000 UTC m=+150.074038901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.298689 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rkbd7" podStartSLOduration=129.298673376 podStartE2EDuration="2m9.298673376s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:09.27122248 +0000 UTC m=+149.572827767" watchObservedRunningTime="2025-11-22 10:42:09.298673376 +0000 UTC m=+149.600278663" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.374252 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.374544 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.874532299 +0000 UTC m=+150.176137586 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.473268 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.474779 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.475200 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:09.975182651 +0000 UTC m=+150.276787938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.481357 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.481404 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 22 10:42:09 crc kubenswrapper[4926]: W1122 10:42:09.499833 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-e9806054c34fdcc72cc450fa2e53b5dffd8990f041f7e820e5f056d679cd0d05 WatchSource:0}: Error finding container e9806054c34fdcc72cc450fa2e53b5dffd8990f041f7e820e5f056d679cd0d05: Status 404 returned error can't find the container with id e9806054c34fdcc72cc450fa2e53b5dffd8990f041f7e820e5f056d679cd0d05 Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.576172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.576595 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.076580826 +0000 UTC m=+150.378186113 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.662061 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.662122 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.679458 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.679760 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.17974637 +0000 UTC m=+150.481351647 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.780642 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.781083 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.281061542 +0000 UTC m=+150.582666849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.882005 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.882161 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.382135457 +0000 UTC m=+150.683740744 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.882408 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.882732 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.382722624 +0000 UTC m=+150.684327901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.977497 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"96ceecb38797a9a8fbd507921c2d826d272a9fc36b21b80e44189f81aae0a52f"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.977565 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"e9806054c34fdcc72cc450fa2e53b5dffd8990f041f7e820e5f056d679cd0d05"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.978061 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.982276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" event={"ID":"831547cf-3937-4d20-83b8-9570e309f0b3","Type":"ContainerStarted","Data":"419edad4794105d1c0e103990eb111a9f941c76c42d5696f7c92371dd369b72a"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.983141 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.983297 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.483273604 +0000 UTC m=+150.784878891 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.983432 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:09 crc kubenswrapper[4926]: E1122 10:42:09.983791 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.483776918 +0000 UTC m=+150.785382205 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.990121 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" event={"ID":"1200b370-1ba8-4366-bb7f-bbf8e0c7ce1b","Type":"ContainerStarted","Data":"17c46da2c696088c3cba137313ddecabc2807793e2b6ac01fd4c4b6f4300b888"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.993627 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"7e32c7cd1d3ecfb40063f4d7fdc372b371d41ab6f7e26e0e01f06d8a67c3ae49"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.993674 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a81aecad793b9f219fbbae7684fc7ed001a4d553c842e33015257298527b6b4d"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.995417 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mwqpv" event={"ID":"873a85f5-ab2c-4594-8968-5104458c4233","Type":"ContainerStarted","Data":"af51e18a846734fff115de92a626f475e4fe972235a3eff4160fc885aa7debdf"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.995457 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-mwqpv" event={"ID":"873a85f5-ab2c-4594-8968-5104458c4233","Type":"ContainerStarted","Data":"a38858be592294b0458e4d17292482b21081e32ba029fa7e44e12c4e59664448"} Nov 22 10:42:09 crc kubenswrapper[4926]: I1122 10:42:09.995711 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.002263 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" event={"ID":"cb40abc0-74ba-42e9-bc0d-a4cdfc998421","Type":"ContainerStarted","Data":"69bc7271caf874ab50d18fa9d59387b41b062b33017d8f26d6cdbf21cd065f38"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.006190 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" event={"ID":"40a4057a-5a42-463f-aeb2-995754abca81","Type":"ContainerStarted","Data":"d889de2ce78c934de1896eef4c06f9c13d7fe9153448cab7b9db794fc3c64de3"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.025463 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" event={"ID":"e0882887-a6d9-4aac-a7d7-c14b934298e2","Type":"ContainerStarted","Data":"9c77717bf8bc99a926edeaaf0f9f0de55909d679abe78de7286b102d64fc4ad2"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.031410 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3df4b315e7c1376810c216be2331383be03b56fd116ba06546d1535f75e18faa"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.031461 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"456795fc558fa5b92dbf1f4f413989fd20ec2e901d07174da8e0f10b53a3b856"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.033699 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" event={"ID":"b95520b5-bae8-4409-9ae4-ad3763092f1c","Type":"ContainerStarted","Data":"bac42beb91e0d0907e25dfd46575f0e572871b487467d792a9f85e382994672a"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.034256 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.044293 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" event={"ID":"c44b98ae-cd26-48e0-9c72-725fd64d22f9","Type":"ContainerStarted","Data":"87903a17cfa5157604e02a5ba69e9bc37674bddfd377eec7f6822727d62dfd59"} Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.045248 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cm8rb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.045311 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.045392 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-c56h5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.045430 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": dial tcp 10.217.0.12:8443: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.045991 4926 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-rwrgv container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.046034 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" podUID="3711dcd6-7ec3-4610-857d-e24f38c6e986" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.26:5443/healthz\": dial tcp 10.217.0.26:5443: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.046678 4926 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-zmfpm container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.046701 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" podUID="55c9f174-5a49-4d37-9ce6-8640b252c9c2" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.40:8443/healthz\": dial tcp 10.217.0.40:8443: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.051950 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zk2x8" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.054636 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-js7td" podStartSLOduration=130.054617347 podStartE2EDuration="2m10.054617347s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.053007781 +0000 UTC m=+150.354613068" watchObservedRunningTime="2025-11-22 10:42:10.054617347 +0000 UTC m=+150.356222634" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.084238 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.086465 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.586443729 +0000 UTC m=+150.888049056 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.088686 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qkq57" podStartSLOduration=130.088665742 podStartE2EDuration="2m10.088665742s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.08822281 +0000 UTC m=+150.389828097" watchObservedRunningTime="2025-11-22 10:42:10.088665742 +0000 UTC m=+150.390271029" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.113236 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-mwqpv" podStartSLOduration=7.113213446 podStartE2EDuration="7.113213446s" podCreationTimestamp="2025-11-22 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.113205955 +0000 UTC m=+150.414811252" watchObservedRunningTime="2025-11-22 10:42:10.113213446 +0000 UTC m=+150.414818733" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.186172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.186532 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.686519635 +0000 UTC m=+150.988124922 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.205202 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-t4dgw" podStartSLOduration=130.20518635 podStartE2EDuration="2m10.20518635s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.169562699 +0000 UTC m=+150.471167986" watchObservedRunningTime="2025-11-22 10:42:10.20518635 +0000 UTC m=+150.506791637" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.243107 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" podStartSLOduration=130.243090355 podStartE2EDuration="2m10.243090355s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.20623687 +0000 UTC m=+150.507842157" watchObservedRunningTime="2025-11-22 10:42:10.243090355 +0000 UTC m=+150.544695642" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.287820 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-jkqsx" podStartSLOduration=130.287803156 podStartE2EDuration="2m10.287803156s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.286833108 +0000 UTC m=+150.588438395" watchObservedRunningTime="2025-11-22 10:42:10.287803156 +0000 UTC m=+150.589408443" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.288480 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.289031 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.789016591 +0000 UTC m=+151.090621878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.309827 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" podStartSLOduration=130.309807986 podStartE2EDuration="2m10.309807986s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.309429506 +0000 UTC m=+150.611034793" watchObservedRunningTime="2025-11-22 10:42:10.309807986 +0000 UTC m=+150.611413273" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.378871 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jb7x7" podStartSLOduration=130.378851664 podStartE2EDuration="2m10.378851664s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.341920676 +0000 UTC m=+150.643525963" watchObservedRunningTime="2025-11-22 10:42:10.378851664 +0000 UTC m=+150.680456951" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.391044 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.391473 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.891457515 +0000 UTC m=+151.193062802 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.426092 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" podStartSLOduration=131.426074997 podStartE2EDuration="2m11.426074997s" podCreationTimestamp="2025-11-22 10:39:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:10.424757919 +0000 UTC m=+150.726363206" watchObservedRunningTime="2025-11-22 10:42:10.426074997 +0000 UTC m=+150.727680284" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.493106 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:10 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:10 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:10 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.493457 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.494371 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.494742 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:10.994725053 +0000 UTC m=+151.296330340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.562781 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.563171 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.568476 4926 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-7zp2q container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.568526 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" podUID="831547cf-3937-4d20-83b8-9570e309f0b3" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.7:8443/livez\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.599206 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.599594 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.099578936 +0000 UTC m=+151.401184223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.699725 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.699828 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.199814027 +0000 UTC m=+151.501419314 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.700483 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.700551 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.200538318 +0000 UTC m=+151.502143615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.725671 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.725729 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.731339 4926 patch_prober.go:28] interesting pod/apiserver-76f77b778f-d4vh5 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" start-of-body= Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.731404 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" podUID="c44b98ae-cd26-48e0-9c72-725fd64d22f9" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.5:8443/livez\": dial tcp 10.217.0.5:8443: connect: connection refused" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.790173 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-954dx" Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.801389 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.801602 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.301572052 +0000 UTC m=+151.603177349 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.801856 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.802302 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.302273652 +0000 UTC m=+151.603878939 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.903007 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.903204 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.403172622 +0000 UTC m=+151.704777919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:10 crc kubenswrapper[4926]: I1122 10:42:10.903338 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:10 crc kubenswrapper[4926]: E1122 10:42:10.903676 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.403662576 +0000 UTC m=+151.705267863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.004508 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.004705 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.504672579 +0000 UTC m=+151.806277866 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.004953 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.005301 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.505291877 +0000 UTC m=+151.806897164 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.050077 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" event={"ID":"c44b98ae-cd26-48e0-9c72-725fd64d22f9","Type":"ContainerStarted","Data":"9dd7044f593eac300691201dcdc1d902aa8d25fcbeda9986e495b83449c9fff1"} Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.051807 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" event={"ID":"5a5a2c07-33d5-4376-bee5-375341146a78","Type":"ContainerStarted","Data":"500c9f596bbe669c3c21a6d3e9e5ffcb5f6b65371b164c161e65687125febc7f"} Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.052620 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-cm8rb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.052664 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.092209 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-zmfpm" Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.106077 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.107476 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.607451843 +0000 UTC m=+151.909057130 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.207831 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.208135 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.708123846 +0000 UTC m=+152.009729133 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.308581 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.308763 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.808739428 +0000 UTC m=+152.110344715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.308812 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.309113 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.809103288 +0000 UTC m=+152.110708575 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.409372 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.409526 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.909504904 +0000 UTC m=+152.211110191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.409569 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.409859 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:11.909848534 +0000 UTC m=+152.211453821 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.479247 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:11 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:11 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:11 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.479301 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.511211 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.513247 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.013217395 +0000 UTC m=+152.314822682 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.526858 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-rwrgv" Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.615527 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.615970 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.115954587 +0000 UTC m=+152.417559874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.717048 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.717280 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.217253929 +0000 UTC m=+152.518859216 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.717337 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.717657 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.21764928 +0000 UTC m=+152.519254567 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.818614 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.818860 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.318826128 +0000 UTC m=+152.620431415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.819033 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.819465 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.319448926 +0000 UTC m=+152.621054213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.920800 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.921028 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.421000804 +0000 UTC m=+152.722606091 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:11 crc kubenswrapper[4926]: I1122 10:42:11.921125 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:11 crc kubenswrapper[4926]: E1122 10:42:11.921463 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.421453127 +0000 UTC m=+152.723058604 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.022285 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.022467 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.52244318 +0000 UTC m=+152.824048467 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.022702 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.023042 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.523034657 +0000 UTC m=+152.824639944 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.057502 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" event={"ID":"5a5a2c07-33d5-4376-bee5-375341146a78","Type":"ContainerStarted","Data":"76928d6453fd20d58cc7caeda82c032ee6b9376d9eb5931f7555d79645a14592"} Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.124003 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.124231 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.624204704 +0000 UTC m=+152.925809991 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.124315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.124623 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.624614315 +0000 UTC m=+152.926219602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.223850 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.224675 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.226494 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.226975 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.227139 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.727107881 +0000 UTC m=+153.028713168 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.227376 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.227674 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.727664057 +0000 UTC m=+153.029269344 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.242698 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.248379 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.248586 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" containerID="cri-o://7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635" gracePeriod=30 Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.309422 4926 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-c56h5 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/healthz\": EOF" start-of-body= Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.309477 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.12:8443/healthz\": EOF" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.328806 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.328990 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.828967038 +0000 UTC m=+153.130572325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.329100 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.329157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.329204 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.329264 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgbtd\" (UniqueName: \"kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.329497 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.829489883 +0000 UTC m=+153.131095170 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.416855 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.417737 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.421222 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.431601 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.431930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgbtd\" (UniqueName: \"kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.432021 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.432070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.432571 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.432663 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:12.932646428 +0000 UTC m=+153.234251715 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.433343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.445737 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.487199 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:12 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:12 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:12 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.487441 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.507818 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgbtd\" (UniqueName: \"kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd\") pod \"community-operators-4864b\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.534622 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms456\" (UniqueName: \"kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.534687 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.534711 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.534745 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.535001 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.034991049 +0000 UTC m=+153.336596336 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.539258 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.604494 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.605362 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.615758 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636376 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636667 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9sjl\" (UniqueName: \"kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636731 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms456\" (UniqueName: \"kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636789 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636820 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636879 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.636930 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.637130 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.137111284 +0000 UTC m=+153.438716571 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.638291 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.638337 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.655820 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms456\" (UniqueName: \"kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456\") pod \"certified-operators-c2r2n\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.739406 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.739650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.739670 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.739704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9sjl\" (UniqueName: \"kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.740540 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.240526316 +0000 UTC m=+153.542131603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.741858 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.741994 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.759906 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9sjl\" (UniqueName: \"kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl\") pod \"community-operators-s7kfj\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.766604 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.803313 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.804230 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.806390 4926 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.833405 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.844017 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.844691 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.844821 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm4gs\" (UniqueName: \"kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.846693 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.344914396 +0000 UTC m=+153.646519693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.846749 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.846850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.848215 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.34820137 +0000 UTC m=+153.649806657 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.911372 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.928746 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.950592 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config\") pod \"fbac19dc-113c-44e6-8744-445e62ea540d\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.950815 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j696v\" (UniqueName: \"kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v\") pod \"fbac19dc-113c-44e6-8744-445e62ea540d\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.950873 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert\") pod \"fbac19dc-113c-44e6-8744-445e62ea540d\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.950965 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles\") pod \"fbac19dc-113c-44e6-8744-445e62ea540d\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.951049 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.951093 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca\") pod \"fbac19dc-113c-44e6-8744-445e62ea540d\" (UID: \"fbac19dc-113c-44e6-8744-445e62ea540d\") " Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.951262 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.951304 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.951322 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm4gs\" (UniqueName: \"kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.952358 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config" (OuterVolumeSpecName: "config") pod "fbac19dc-113c-44e6-8744-445e62ea540d" (UID: "fbac19dc-113c-44e6-8744-445e62ea540d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:42:12 crc kubenswrapper[4926]: E1122 10:42:12.953928 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.453900037 +0000 UTC m=+153.755505324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.954875 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.955186 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca" (OuterVolumeSpecName: "client-ca") pod "fbac19dc-113c-44e6-8744-445e62ea540d" (UID: "fbac19dc-113c-44e6-8744-445e62ea540d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.955516 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.955534 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "fbac19dc-113c-44e6-8744-445e62ea540d" (UID: "fbac19dc-113c-44e6-8744-445e62ea540d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.958309 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v" (OuterVolumeSpecName: "kube-api-access-j696v") pod "fbac19dc-113c-44e6-8744-445e62ea540d" (UID: "fbac19dc-113c-44e6-8744-445e62ea540d"). InnerVolumeSpecName "kube-api-access-j696v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.958485 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "fbac19dc-113c-44e6-8744-445e62ea540d" (UID: "fbac19dc-113c-44e6-8744-445e62ea540d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:42:12 crc kubenswrapper[4926]: I1122 10:42:12.978376 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm4gs\" (UniqueName: \"kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs\") pod \"certified-operators-2vvkj\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053795 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053854 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fbac19dc-113c-44e6-8744-445e62ea540d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053865 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053873 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053904 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fbac19dc-113c-44e6-8744-445e62ea540d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.053916 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j696v\" (UniqueName: \"kubernetes.io/projected/fbac19dc-113c-44e6-8744-445e62ea540d-kube-api-access-j696v\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.054182 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.554168859 +0000 UTC m=+153.855774146 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.064464 4926 generic.go:334] "Generic (PLEG): container finished" podID="f6332434-11ab-46ab-8379-c056d2c292b5" containerID="3bc4c73d77c40218b9b6c248cd942048e0a507829d42cc11887f49232b4f78e8" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.064572 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" event={"ID":"f6332434-11ab-46ab-8379-c056d2c292b5","Type":"ContainerDied","Data":"3bc4c73d77c40218b9b6c248cd942048e0a507829d42cc11887f49232b4f78e8"} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.068085 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" event={"ID":"5a5a2c07-33d5-4376-bee5-375341146a78","Type":"ContainerStarted","Data":"04f5e2640b6a5155c03969f75fcecfd84db20652b0a6882bf2fc88d7f95d34c6"} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.068123 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" event={"ID":"5a5a2c07-33d5-4376-bee5-375341146a78","Type":"ContainerStarted","Data":"c480e50e1b9a74520cc5ad53b49a1639c054b5fa53715577db27c1d56da61d4e"} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.070217 4926 generic.go:334] "Generic (PLEG): container finished" podID="fbac19dc-113c-44e6-8744-445e62ea540d" containerID="7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635" exitCode=0 Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.070244 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" event={"ID":"fbac19dc-113c-44e6-8744-445e62ea540d","Type":"ContainerDied","Data":"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635"} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.070261 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" event={"ID":"fbac19dc-113c-44e6-8744-445e62ea540d","Type":"ContainerDied","Data":"697f4a4ebd86df4136276139716a6ccac72daf864d0f7ba5fbfd73b98e382c1b"} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.070276 4926 scope.go:117] "RemoveContainer" containerID="7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.070360 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-c56h5" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.084588 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:42:13 crc kubenswrapper[4926]: W1122 10:42:13.097336 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8674be0_e053_4d83_9a04_008800542315.slice/crio-905ff7d3e0dc94ea6a581890a54d9d0bbc11affe373212c13fe45eb7a602b85a WatchSource:0}: Error finding container 905ff7d3e0dc94ea6a581890a54d9d0bbc11affe373212c13fe45eb7a602b85a: Status 404 returned error can't find the container with id 905ff7d3e0dc94ea6a581890a54d9d0bbc11affe373212c13fe45eb7a602b85a Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.107428 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-p5j7r" podStartSLOduration=10.107408374 podStartE2EDuration="10.107408374s" podCreationTimestamp="2025-11-22 10:42:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:13.106984752 +0000 UTC m=+153.408590039" watchObservedRunningTime="2025-11-22 10:42:13.107408374 +0000 UTC m=+153.409013661" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.112835 4926 scope.go:117] "RemoveContainer" containerID="7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.113564 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635\": container with ID starting with 7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635 not found: ID does not exist" containerID="7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.113599 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635"} err="failed to get container status \"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635\": rpc error: code = NotFound desc = could not find container \"7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635\": container with ID starting with 7d2a23f72bda1f20c8bbb73708aa680bd01b6779943dc1b6b4626f6187b74635 not found: ID does not exist" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.126656 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.129305 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.132042 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-c56h5"] Nov 22 10:42:13 crc kubenswrapper[4926]: W1122 10:42:13.143625 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc34f3883_c9b2_41fe_9a17_127918e9ef88.slice/crio-a7fbe18a98d07156d38a13e01ae344c65a27aca1b4368360aa4ea9c12051d596 WatchSource:0}: Error finding container a7fbe18a98d07156d38a13e01ae344c65a27aca1b4368360aa4ea9c12051d596: Status 404 returned error can't find the container with id a7fbe18a98d07156d38a13e01ae344c65a27aca1b4368360aa4ea9c12051d596 Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.156479 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.156826 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.157354 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.657339084 +0000 UTC m=+153.958944371 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.169633 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.260746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.261311 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.761291481 +0000 UTC m=+154.062896768 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.361594 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.361766 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.861739928 +0000 UTC m=+154.163345215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.363777 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.365819 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.865799874 +0000 UTC m=+154.167405171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.411815 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.465543 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.466159 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:13.966141658 +0000 UTC m=+154.267746945 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: W1122 10:42:13.470660 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0547d70_c2f4_4d26_ad73_9f65df8b1bc1.slice/crio-66eda2fe6f592d117117b9ab5c56c7007315b5399036fb216863019e97e3db11 WatchSource:0}: Error finding container 66eda2fe6f592d117117b9ab5c56c7007315b5399036fb216863019e97e3db11: Status 404 returned error can't find the container with id 66eda2fe6f592d117117b9ab5c56c7007315b5399036fb216863019e97e3db11 Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.478041 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:13 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:13 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:13 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.478114 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.566764 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.567255 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:14.067243014 +0000 UTC m=+154.368848301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.667511 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.667822 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 10:42:14.167781774 +0000 UTC m=+154.469387061 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.668210 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: E1122 10:42:13.668562 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 10:42:14.168546736 +0000 UTC m=+154.470152023 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-wlnj9" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.742159 4926 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-22T10:42:12.806421593Z","Handler":null,"Name":""} Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.767991 4926 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.768046 4926 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.768951 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.773956 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.870196 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.872842 4926 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.872899 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.901024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-wlnj9\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:13 crc kubenswrapper[4926]: I1122 10:42:13.952669 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.078287 4926 generic.go:334] "Generic (PLEG): container finished" podID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerID="28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4" exitCode=0 Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.078362 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerDied","Data":"28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.078392 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerStarted","Data":"a7fbe18a98d07156d38a13e01ae344c65a27aca1b4368360aa4ea9c12051d596"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.080869 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.081663 4926 generic.go:334] "Generic (PLEG): container finished" podID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerID="10b1cc662a876506bc6144f9bd713983225c8537e26d8c8d52af72e95c22ba6f" exitCode=0 Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.081845 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerDied","Data":"10b1cc662a876506bc6144f9bd713983225c8537e26d8c8d52af72e95c22ba6f"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.081915 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerStarted","Data":"ff671ab63b3078d6f7c97c946445b21ace2b55f9420a8b6ce9384161f3842bd6"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.086174 4926 generic.go:334] "Generic (PLEG): container finished" podID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerID="3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9" exitCode=0 Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.086262 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerDied","Data":"3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.086294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerStarted","Data":"66eda2fe6f592d117117b9ab5c56c7007315b5399036fb216863019e97e3db11"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.113958 4926 generic.go:334] "Generic (PLEG): container finished" podID="b8674be0-e053-4d83-9a04-008800542315" containerID="ac229eb4924c7030ab410fb7d0377b026c3342b5828e8318068b90be7bff47cf" exitCode=0 Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.114424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerDied","Data":"ac229eb4924c7030ab410fb7d0377b026c3342b5828e8318068b90be7bff47cf"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.114454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerStarted","Data":"905ff7d3e0dc94ea6a581890a54d9d0bbc11affe373212c13fe45eb7a602b85a"} Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.173580 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.269791 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:42:14 crc kubenswrapper[4926]: E1122 10:42:14.270069 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.270083 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.270215 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" containerName="controller-manager" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.270640 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.273504 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.273638 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.273823 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.274093 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.278444 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.280132 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.283532 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.286929 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.301811 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.302871 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.304356 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.305475 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.310230 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.337074 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374494 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume\") pod \"f6332434-11ab-46ab-8379-c056d2c292b5\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374570 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8hwz\" (UniqueName: \"kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz\") pod \"f6332434-11ab-46ab-8379-c056d2c292b5\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374596 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume\") pod \"f6332434-11ab-46ab-8379-c056d2c292b5\" (UID: \"f6332434-11ab-46ab-8379-c056d2c292b5\") " Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374844 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374898 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374960 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.374991 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.375015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.375159 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smgdm\" (UniqueName: \"kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.375619 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume" (OuterVolumeSpecName: "config-volume") pod "f6332434-11ab-46ab-8379-c056d2c292b5" (UID: "f6332434-11ab-46ab-8379-c056d2c292b5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.379815 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f6332434-11ab-46ab-8379-c056d2c292b5" (UID: "f6332434-11ab-46ab-8379-c056d2c292b5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.379946 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz" (OuterVolumeSpecName: "kube-api-access-d8hwz") pod "f6332434-11ab-46ab-8379-c056d2c292b5" (UID: "f6332434-11ab-46ab-8379-c056d2c292b5"). InnerVolumeSpecName "kube-api-access-d8hwz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.403972 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:42:14 crc kubenswrapper[4926]: E1122 10:42:14.404215 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6332434-11ab-46ab-8379-c056d2c292b5" containerName="collect-profiles" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.404232 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6332434-11ab-46ab-8379-c056d2c292b5" containerName="collect-profiles" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.404331 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6332434-11ab-46ab-8379-c056d2c292b5" containerName="collect-profiles" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.405446 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.407297 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.415235 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.475506 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:14 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:14 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:14 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.475580 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476139 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476333 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smgdm\" (UniqueName: \"kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476453 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476857 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476987 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477111 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dl7st\" (UniqueName: \"kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477271 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477370 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477495 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f6332434-11ab-46ab-8379-c056d2c292b5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477586 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f6332434-11ab-46ab-8379-c056d2c292b5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.477680 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8hwz\" (UniqueName: \"kubernetes.io/projected/f6332434-11ab-46ab-8379-c056d2c292b5-kube-api-access-d8hwz\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.476588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.478957 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.479566 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.480069 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.482331 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.491524 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.491873 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smgdm\" (UniqueName: \"kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm\") pod \"controller-manager-879f6c89f-h2qnt\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.578861 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.578967 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dl7st\" (UniqueName: \"kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.579007 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.579404 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.579455 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.591279 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.592739 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbac19dc-113c-44e6-8744-445e62ea540d" path="/var/lib/kubelet/pods/fbac19dc-113c-44e6-8744-445e62ea540d/volumes" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.594948 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dl7st\" (UniqueName: \"kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st\") pod \"redhat-marketplace-x6q97\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.604040 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.667796 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.725331 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.816496 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.820123 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.835345 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.842724 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.891843 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9f557\" (UniqueName: \"kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.891926 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.891954 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.993541 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.993592 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.993650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9f557\" (UniqueName: \"kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.994797 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:14 crc kubenswrapper[4926]: I1122 10:42:14.995053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.007433 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.012524 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9f557\" (UniqueName: \"kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557\") pod \"redhat-marketplace-mcdg4\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:15 crc kubenswrapper[4926]: W1122 10:42:15.016918 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ea24004_1068_4fad_a694_c92251db240d.slice/crio-ca376f0daf1c3f8fd8be9865784c76d02be809031927638b3cb37ad948686701 WatchSource:0}: Error finding container ca376f0daf1c3f8fd8be9865784c76d02be809031927638b3cb37ad948686701: Status 404 returned error can't find the container with id ca376f0daf1c3f8fd8be9865784c76d02be809031927638b3cb37ad948686701 Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.143147 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.145802 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.146370 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn" event={"ID":"f6332434-11ab-46ab-8379-c056d2c292b5","Type":"ContainerDied","Data":"9c8d58efb6dcccfd45042b4a981bcbad9c0ed86f357853987637f580841bc003"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.146414 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9c8d58efb6dcccfd45042b4a981bcbad9c0ed86f357853987637f580841bc003" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.152974 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" event={"ID":"d7487242-27ca-4f15-8d7f-6a7cf67e8992","Type":"ContainerStarted","Data":"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.153017 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" event={"ID":"d7487242-27ca-4f15-8d7f-6a7cf67e8992","Type":"ContainerStarted","Data":"99b73edd8a23876dffd0852a6ef6fafaa9b7d377a55d80271beab1e22092f62a"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.153061 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.154111 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerStarted","Data":"ca376f0daf1c3f8fd8be9865784c76d02be809031927638b3cb37ad948686701"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.156719 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" event={"ID":"aadd3c70-0dfa-42cb-879d-026a0ed055ba","Type":"ContainerStarted","Data":"3aa59bd6f40427a36bfdbe2fbf567da7b97b34306e0ceb3d7dd972d2c071941a"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.156754 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" event={"ID":"aadd3c70-0dfa-42cb-879d-026a0ed055ba","Type":"ContainerStarted","Data":"fb2cca53a8dbf1a269fbc1f2306cfebb7f39b7fcaa396a524113c170a45d6caa"} Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.173639 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.176389 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" podStartSLOduration=135.176372063 podStartE2EDuration="2m15.176372063s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:15.173998505 +0000 UTC m=+155.475603792" watchObservedRunningTime="2025-11-22 10:42:15.176372063 +0000 UTC m=+155.477977350" Nov 22 10:42:15 crc kubenswrapper[4926]: W1122 10:42:15.190466 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod404e486e_98a0_4803_bb70_2db849a922ea.slice/crio-778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464 WatchSource:0}: Error finding container 778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464: Status 404 returned error can't find the container with id 778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464 Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.380396 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.380642 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.382272 4926 patch_prober.go:28] interesting pod/console-f9d7485db-nsj2w container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" start-of-body= Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.382347 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-nsj2w" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerName="console" probeResult="failure" output="Get \"https://10.217.0.18:8443/health\": dial tcp 10.217.0.18:8443: connect: connection refused" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.415027 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.416105 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.422668 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.425144 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.488967 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:15 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:15 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:15 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.489331 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.500793 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.500850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4n575\" (UniqueName: \"kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.500914 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.570530 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.582876 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-7zp2q" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.597951 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.602411 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.602895 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.602922 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4n575\" (UniqueName: \"kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.602981 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.603088 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.603122 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.603328 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.604124 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.604369 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.604393 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:15 crc kubenswrapper[4926]: W1122 10:42:15.611221 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72e5abe7_12f0_4180_9d5c_9d55fe800bc5.slice/crio-7b3583df78b38abf491dbef737567b4bb4ed575f4695ddad396222cc002edc32 WatchSource:0}: Error finding container 7b3583df78b38abf491dbef737567b4bb4ed575f4695ddad396222cc002edc32: Status 404 returned error can't find the container with id 7b3583df78b38abf491dbef737567b4bb4ed575f4695ddad396222cc002edc32 Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.625117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4n575\" (UniqueName: \"kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575\") pod \"redhat-operators-wxk9m\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.750281 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.751053 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-brhnh" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.766042 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-d4vh5" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.784193 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.844943 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.846753 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.862496 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.942878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.943155 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:15 crc kubenswrapper[4926]: I1122 10:42:15.943196 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l88q7\" (UniqueName: \"kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.044709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.044770 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l88q7\" (UniqueName: \"kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.044860 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.051127 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.060528 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.099109 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l88q7\" (UniqueName: \"kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7\") pod \"redhat-operators-q4zdr\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.182269 4926 generic.go:334] "Generic (PLEG): container finished" podID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerID="437a77cb04006d727d4132dc98dae799930c277e416c9f4bc3f6ca44c97143cb" exitCode=0 Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.182395 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerDied","Data":"437a77cb04006d727d4132dc98dae799930c277e416c9f4bc3f6ca44c97143cb"} Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.182428 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerStarted","Data":"7b3583df78b38abf491dbef737567b4bb4ed575f4695ddad396222cc002edc32"} Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.202532 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.243393 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.260342 4926 generic.go:334] "Generic (PLEG): container finished" podID="8ea24004-1068-4fad-a694-c92251db240d" containerID="bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1" exitCode=0 Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.260483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerDied","Data":"bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1"} Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.372375 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"404e486e-98a0-4803-bb70-2db849a922ea","Type":"ContainerStarted","Data":"5e63712129256991e3681fed7ba3d2105a3d47a31d038556570e438d6a6e6df4"} Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.372414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"404e486e-98a0-4803-bb70-2db849a922ea","Type":"ContainerStarted","Data":"778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464"} Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.453413 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.453395488 podStartE2EDuration="2.453395488s" podCreationTimestamp="2025-11-22 10:42:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:16.452298066 +0000 UTC m=+156.753903353" watchObservedRunningTime="2025-11-22 10:42:16.453395488 +0000 UTC m=+156.755000775" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.453997 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" podStartSLOduration=4.453992245 podStartE2EDuration="4.453992245s" podCreationTimestamp="2025-11-22 10:42:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:42:16.407825273 +0000 UTC m=+156.709430560" watchObservedRunningTime="2025-11-22 10:42:16.453992245 +0000 UTC m=+156.755597532" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.482210 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.487695 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:16 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:16 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:16 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.487785 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.523417 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.524284 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.525958 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.545459 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.551469 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.573231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.573284 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.675482 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.675853 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.676219 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.721809 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.763425 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.880644 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:42:16 crc kubenswrapper[4926]: I1122 10:42:16.890177 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.397444 4926 generic.go:334] "Generic (PLEG): container finished" podID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerID="8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4" exitCode=0 Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.397761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerDied","Data":"8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4"} Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.397802 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerStarted","Data":"ceaca808fb030ec63ac8dca9d0411d5fb272e9c6f58593f973d59614d181dd85"} Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.423741 4926 generic.go:334] "Generic (PLEG): container finished" podID="404e486e-98a0-4803-bb70-2db849a922ea" containerID="5e63712129256991e3681fed7ba3d2105a3d47a31d038556570e438d6a6e6df4" exitCode=0 Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.423843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"404e486e-98a0-4803-bb70-2db849a922ea","Type":"ContainerDied","Data":"5e63712129256991e3681fed7ba3d2105a3d47a31d038556570e438d6a6e6df4"} Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.431225 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerStarted","Data":"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb"} Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.431287 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerStarted","Data":"e8f57f9e6cd789fc91d8904c14dc5491420480e03935accd1a09ffc7ef6c32cf"} Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.474673 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.477484 4926 patch_prober.go:28] interesting pod/router-default-5444994796-8k4sd container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 10:42:17 crc kubenswrapper[4926]: [-]has-synced failed: reason withheld Nov 22 10:42:17 crc kubenswrapper[4926]: [+]process-running ok Nov 22 10:42:17 crc kubenswrapper[4926]: healthz check failed Nov 22 10:42:17 crc kubenswrapper[4926]: I1122 10:42:17.477568 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-8k4sd" podUID="12a1d8f7-8b05-42ae-bba2-13bb443eae1e" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.475227 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerID="54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb" exitCode=0 Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.476426 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerDied","Data":"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb"} Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.478894 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.483453 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-8k4sd" Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.504320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14bd2176-9eab-42ea-b510-b59e08e5c89a","Type":"ContainerStarted","Data":"86e3380054ef2a5986b4205216ad1eb4ff00f9a73b34375e566dcdeb842c1353"} Nov 22 10:42:18 crc kubenswrapper[4926]: I1122 10:42:18.555998 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-mwqpv" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.024891 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.167095 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access\") pod \"404e486e-98a0-4803-bb70-2db849a922ea\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.167150 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir\") pod \"404e486e-98a0-4803-bb70-2db849a922ea\" (UID: \"404e486e-98a0-4803-bb70-2db849a922ea\") " Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.167526 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "404e486e-98a0-4803-bb70-2db849a922ea" (UID: "404e486e-98a0-4803-bb70-2db849a922ea"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.172515 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "404e486e-98a0-4803-bb70-2db849a922ea" (UID: "404e486e-98a0-4803-bb70-2db849a922ea"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.269105 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/404e486e-98a0-4803-bb70-2db849a922ea-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.269137 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/404e486e-98a0-4803-bb70-2db849a922ea-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.580042 4926 generic.go:334] "Generic (PLEG): container finished" podID="14bd2176-9eab-42ea-b510-b59e08e5c89a" containerID="b8d2601f83661ff9000b7ad5726e2b5b9796aec86484a743387bd60e786cf471" exitCode=0 Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.580117 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14bd2176-9eab-42ea-b510-b59e08e5c89a","Type":"ContainerDied","Data":"b8d2601f83661ff9000b7ad5726e2b5b9796aec86484a743387bd60e786cf471"} Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.587929 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"404e486e-98a0-4803-bb70-2db849a922ea","Type":"ContainerDied","Data":"778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464"} Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.587983 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="778e7fe59cf71c463fd495aec6e85bd183ba53851f4e3b98bf15f8641fac2464" Nov 22 10:42:19 crc kubenswrapper[4926]: I1122 10:42:19.587951 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.037144 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.206083 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir\") pod \"14bd2176-9eab-42ea-b510-b59e08e5c89a\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.206167 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access\") pod \"14bd2176-9eab-42ea-b510-b59e08e5c89a\" (UID: \"14bd2176-9eab-42ea-b510-b59e08e5c89a\") " Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.206416 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "14bd2176-9eab-42ea-b510-b59e08e5c89a" (UID: "14bd2176-9eab-42ea-b510-b59e08e5c89a"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.206691 4926 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/14bd2176-9eab-42ea-b510-b59e08e5c89a-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.226942 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "14bd2176-9eab-42ea-b510-b59e08e5c89a" (UID: "14bd2176-9eab-42ea-b510-b59e08e5c89a"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.308215 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14bd2176-9eab-42ea-b510-b59e08e5c89a-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.636186 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"14bd2176-9eab-42ea-b510-b59e08e5c89a","Type":"ContainerDied","Data":"86e3380054ef2a5986b4205216ad1eb4ff00f9a73b34375e566dcdeb842c1353"} Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.636236 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86e3380054ef2a5986b4205216ad1eb4ff00f9a73b34375e566dcdeb842c1353" Nov 22 10:42:21 crc kubenswrapper[4926]: I1122 10:42:21.636303 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 10:42:21 crc kubenswrapper[4926]: E1122 10:42:21.678363 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod14bd2176_9eab_42ea_b510_b59e08e5c89a.slice/crio-86e3380054ef2a5986b4205216ad1eb4ff00f9a73b34375e566dcdeb842c1353\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-pod14bd2176_9eab_42ea_b510_b59e08e5c89a.slice\": RecentStats: unable to find data in memory cache]" Nov 22 10:42:22 crc kubenswrapper[4926]: I1122 10:42:22.422471 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:42:22 crc kubenswrapper[4926]: I1122 10:42:22.451270 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c42b6f47-b1a4-4fee-8681-3b5288370323-metrics-certs\") pod \"network-metrics-daemon-jfbf4\" (UID: \"c42b6f47-b1a4-4fee-8681-3b5288370323\") " pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:42:22 crc kubenswrapper[4926]: I1122 10:42:22.732809 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jfbf4" Nov 22 10:42:23 crc kubenswrapper[4926]: I1122 10:42:23.207065 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jfbf4"] Nov 22 10:42:24 crc kubenswrapper[4926]: I1122 10:42:24.605274 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:24 crc kubenswrapper[4926]: I1122 10:42:24.611434 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.433491 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.437383 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.599591 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.599699 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.600073 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:25 crc kubenswrapper[4926]: I1122 10:42:25.600116 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:33 crc kubenswrapper[4926]: I1122 10:42:33.965032 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.600000 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.600095 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.600999 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.601033 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.601081 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.601776 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"4808990dd4c5d9d82845666b187d49bdb8e2017d77bdcb7f8436e88a90e530df"} pod="openshift-console/downloads-7954f5f757-8l5sv" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.601819 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" containerID="cri-o://4808990dd4c5d9d82845666b187d49bdb8e2017d77bdcb7f8436e88a90e530df" gracePeriod=2 Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.602437 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.602504 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:35 crc kubenswrapper[4926]: I1122 10:42:35.746255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" event={"ID":"c42b6f47-b1a4-4fee-8681-3b5288370323","Type":"ContainerStarted","Data":"2a2470f69972bab61814ad30fcaa9a1f038b1112b6f2773a9c01768ca8740783"} Nov 22 10:42:37 crc kubenswrapper[4926]: I1122 10:42:37.761391 4926 generic.go:334] "Generic (PLEG): container finished" podID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerID="4808990dd4c5d9d82845666b187d49bdb8e2017d77bdcb7f8436e88a90e530df" exitCode=0 Nov 22 10:42:37 crc kubenswrapper[4926]: I1122 10:42:37.761458 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8l5sv" event={"ID":"6e0f47bd-848d-4619-a2f9-eb503d04e2e0","Type":"ContainerDied","Data":"4808990dd4c5d9d82845666b187d49bdb8e2017d77bdcb7f8436e88a90e530df"} Nov 22 10:42:39 crc kubenswrapper[4926]: I1122 10:42:39.661390 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:42:39 crc kubenswrapper[4926]: I1122 10:42:39.661924 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:42:45 crc kubenswrapper[4926]: I1122 10:42:45.601325 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:45 crc kubenswrapper[4926]: I1122 10:42:45.601666 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:42:46 crc kubenswrapper[4926]: I1122 10:42:46.485261 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-h55df" Nov 22 10:42:49 crc kubenswrapper[4926]: I1122 10:42:49.601746 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 10:42:55 crc kubenswrapper[4926]: I1122 10:42:55.600330 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:42:55 crc kubenswrapper[4926]: I1122 10:42:55.600835 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:00 crc kubenswrapper[4926]: E1122 10:43:00.621856 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 10:43:00 crc kubenswrapper[4926]: E1122 10:43:00.622091 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-n9sjl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-s7kfj_openshift-marketplace(4ada1ec1-1f41-40d3-a36d-3de32bb19c9c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:00 crc kubenswrapper[4926]: E1122 10:43:00.624150 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-s7kfj" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" Nov 22 10:43:05 crc kubenswrapper[4926]: I1122 10:43:05.599707 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:05 crc kubenswrapper[4926]: I1122 10:43:05.600136 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:08 crc kubenswrapper[4926]: E1122 10:43:08.220357 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-s7kfj" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" Nov 22 10:43:09 crc kubenswrapper[4926]: I1122 10:43:09.661618 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:43:09 crc kubenswrapper[4926]: I1122 10:43:09.662085 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:43:09 crc kubenswrapper[4926]: I1122 10:43:09.662167 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:43:09 crc kubenswrapper[4926]: I1122 10:43:09.662980 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:43:09 crc kubenswrapper[4926]: I1122 10:43:09.663072 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a" gracePeriod=600 Nov 22 10:43:11 crc kubenswrapper[4926]: I1122 10:43:11.987086 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a" exitCode=0 Nov 22 10:43:11 crc kubenswrapper[4926]: I1122 10:43:11.987187 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a"} Nov 22 10:43:12 crc kubenswrapper[4926]: E1122 10:43:12.246337 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 10:43:12 crc kubenswrapper[4926]: E1122 10:43:12.246498 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4n575,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wxk9m_openshift-marketplace(4d60f8f8-4bda-476b-b339-e29ec2713912): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:12 crc kubenswrapper[4926]: E1122 10:43:12.247600 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wxk9m" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" Nov 22 10:43:13 crc kubenswrapper[4926]: E1122 10:43:13.625337 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wxk9m" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" Nov 22 10:43:13 crc kubenswrapper[4926]: E1122 10:43:13.701997 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 22 10:43:13 crc kubenswrapper[4926]: E1122 10:43:13.702202 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kgbtd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4864b_openshift-marketplace(c34f3883-c9b2-41fe-9a17-127918e9ef88): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:13 crc kubenswrapper[4926]: E1122 10:43:13.703442 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4864b" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" Nov 22 10:43:15 crc kubenswrapper[4926]: I1122 10:43:15.599225 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:15 crc kubenswrapper[4926]: I1122 10:43:15.599730 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:16 crc kubenswrapper[4926]: E1122 10:43:16.226806 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 10:43:16 crc kubenswrapper[4926]: E1122 10:43:16.227116 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vm4gs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-2vvkj_openshift-marketplace(f0547d70-c2f4-4d26-ad73-9f65df8b1bc1): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:16 crc kubenswrapper[4926]: E1122 10:43:16.228583 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-2vvkj" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" Nov 22 10:43:18 crc kubenswrapper[4926]: E1122 10:43:18.676619 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4864b" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" Nov 22 10:43:18 crc kubenswrapper[4926]: E1122 10:43:18.677440 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-2vvkj" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" Nov 22 10:43:20 crc kubenswrapper[4926]: E1122 10:43:20.425301 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 10:43:20 crc kubenswrapper[4926]: E1122 10:43:20.425812 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dl7st,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-x6q97_openshift-marketplace(8ea24004-1068-4fad-a694-c92251db240d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:20 crc kubenswrapper[4926]: E1122 10:43:20.427242 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-x6q97" podUID="8ea24004-1068-4fad-a694-c92251db240d" Nov 22 10:43:21 crc kubenswrapper[4926]: E1122 10:43:21.006970 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 22 10:43:21 crc kubenswrapper[4926]: E1122 10:43:21.007495 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ms456,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-c2r2n_openshift-marketplace(b8674be0-e053-4d83-9a04-008800542315): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:21 crc kubenswrapper[4926]: E1122 10:43:21.009275 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-c2r2n" podUID="b8674be0-e053-4d83-9a04-008800542315" Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.046458 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-8l5sv" event={"ID":"6e0f47bd-848d-4619-a2f9-eb503d04e2e0","Type":"ContainerStarted","Data":"89ff6664622b57119540cecf3eb7e3b1201ca462cf118f1526db2ee67ea3538d"} Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.047759 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.047851 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.047909 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.049720 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" event={"ID":"c42b6f47-b1a4-4fee-8681-3b5288370323","Type":"ContainerStarted","Data":"193a577631066ee33f81c6c4fdde887c050e82f567f50d070e79208de9e95af1"} Nov 22 10:43:21 crc kubenswrapper[4926]: I1122 10:43:21.052489 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e"} Nov 22 10:43:21 crc kubenswrapper[4926]: E1122 10:43:21.054107 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-c2r2n" podUID="b8674be0-e053-4d83-9a04-008800542315" Nov 22 10:43:21 crc kubenswrapper[4926]: E1122 10:43:21.054348 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-x6q97" podUID="8ea24004-1068-4fad-a694-c92251db240d" Nov 22 10:43:22 crc kubenswrapper[4926]: I1122 10:43:22.060690 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jfbf4" event={"ID":"c42b6f47-b1a4-4fee-8681-3b5288370323","Type":"ContainerStarted","Data":"c51b58cfbcf7f9c81d4b71977e3c121332bb3a1a2168554d3fcf410bb7a942d6"} Nov 22 10:43:22 crc kubenswrapper[4926]: I1122 10:43:22.062070 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:22 crc kubenswrapper[4926]: I1122 10:43:22.062180 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:23 crc kubenswrapper[4926]: I1122 10:43:23.067694 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:23 crc kubenswrapper[4926]: I1122 10:43:23.067758 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:23 crc kubenswrapper[4926]: E1122 10:43:23.763601 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 22 10:43:23 crc kubenswrapper[4926]: E1122 10:43:23.764135 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9f557,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-mcdg4_openshift-marketplace(72e5abe7-12f0-4180-9d5c-9d55fe800bc5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:23 crc kubenswrapper[4926]: E1122 10:43:23.765262 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-mcdg4" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" Nov 22 10:43:24 crc kubenswrapper[4926]: E1122 10:43:24.074897 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-mcdg4" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" Nov 22 10:43:24 crc kubenswrapper[4926]: E1122 10:43:24.900297 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 22 10:43:24 crc kubenswrapper[4926]: E1122 10:43:24.900467 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l88q7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-q4zdr_openshift-marketplace(1ddd6078-62c2-454b-8a1c-68ea18b0a7ef): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 22 10:43:24 crc kubenswrapper[4926]: E1122 10:43:24.901638 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-q4zdr" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" Nov 22 10:43:25 crc kubenswrapper[4926]: E1122 10:43:25.082652 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-q4zdr" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" Nov 22 10:43:25 crc kubenswrapper[4926]: I1122 10:43:25.129029 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-jfbf4" podStartSLOduration=205.129001523 podStartE2EDuration="3m25.129001523s" podCreationTimestamp="2025-11-22 10:40:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:43:25.126583004 +0000 UTC m=+225.428188321" watchObservedRunningTime="2025-11-22 10:43:25.129001523 +0000 UTC m=+225.430606840" Nov 22 10:43:25 crc kubenswrapper[4926]: I1122 10:43:25.599594 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:25 crc kubenswrapper[4926]: I1122 10:43:25.599656 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:25 crc kubenswrapper[4926]: I1122 10:43:25.600004 4926 patch_prober.go:28] interesting pod/downloads-7954f5f757-8l5sv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" start-of-body= Nov 22 10:43:25 crc kubenswrapper[4926]: I1122 10:43:25.600091 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-8l5sv" podUID="6e0f47bd-848d-4619-a2f9-eb503d04e2e0" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.21:8080/\": dial tcp 10.217.0.21:8080: connect: connection refused" Nov 22 10:43:35 crc kubenswrapper[4926]: I1122 10:43:35.620980 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-8l5sv" Nov 22 10:43:45 crc kubenswrapper[4926]: I1122 10:43:45.215232 4926 generic.go:334] "Generic (PLEG): container finished" podID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerID="d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4" exitCode=0 Nov 22 10:43:45 crc kubenswrapper[4926]: I1122 10:43:45.216306 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerDied","Data":"d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4"} Nov 22 10:43:45 crc kubenswrapper[4926]: I1122 10:43:45.218264 4926 generic.go:334] "Generic (PLEG): container finished" podID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerID="d4d51d0c54c4c2eed9f21d4a12e3c215e806a7167ff287fd28a2486f451ac374" exitCode=0 Nov 22 10:43:45 crc kubenswrapper[4926]: I1122 10:43:45.218291 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerDied","Data":"d4d51d0c54c4c2eed9f21d4a12e3c215e806a7167ff287fd28a2486f451ac374"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.235335 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerStarted","Data":"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.238030 4926 generic.go:334] "Generic (PLEG): container finished" podID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerID="82f8048d84fbcd886fed79b2b3a99886018676e6da3cb50a12678b5d6ad2223e" exitCode=0 Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.238096 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerDied","Data":"82f8048d84fbcd886fed79b2b3a99886018676e6da3cb50a12678b5d6ad2223e"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.241976 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerStarted","Data":"f53a3f4aff921f5ec3762c205fe1bea56fadf89df3a4f1e13781dcac9dcd1de0"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.244751 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerStarted","Data":"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.255140 4926 generic.go:334] "Generic (PLEG): container finished" podID="8ea24004-1068-4fad-a694-c92251db240d" containerID="8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024" exitCode=0 Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.255208 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerDied","Data":"8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.261250 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerStarted","Data":"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.270490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerStarted","Data":"cd84f58c03359fada0518a37bb543e5f69283a0f74945b2db935ca499bc9852d"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.278703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerStarted","Data":"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf"} Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.354571 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wxk9m" podStartSLOduration=3.189744903 podStartE2EDuration="1m33.354551168s" podCreationTimestamp="2025-11-22 10:42:15 +0000 UTC" firstStartedPulling="2025-11-22 10:42:17.40138933 +0000 UTC m=+157.702994617" lastFinishedPulling="2025-11-22 10:43:47.566195585 +0000 UTC m=+247.867800882" observedRunningTime="2025-11-22 10:43:48.353044215 +0000 UTC m=+248.654649502" watchObservedRunningTime="2025-11-22 10:43:48.354551168 +0000 UTC m=+248.656156455" Nov 22 10:43:48 crc kubenswrapper[4926]: I1122 10:43:48.376597 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s7kfj" podStartSLOduration=2.8707429319999997 podStartE2EDuration="1m36.376577807s" podCreationTimestamp="2025-11-22 10:42:12 +0000 UTC" firstStartedPulling="2025-11-22 10:42:14.08486685 +0000 UTC m=+154.386472137" lastFinishedPulling="2025-11-22 10:43:47.590701725 +0000 UTC m=+247.892307012" observedRunningTime="2025-11-22 10:43:48.375243848 +0000 UTC m=+248.676849135" watchObservedRunningTime="2025-11-22 10:43:48.376577807 +0000 UTC m=+248.678183084" Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.287285 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerStarted","Data":"00f3165ed6a36ddf7f8dfb3af18205b838dc89458f8411c467deb09e4185ca15"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.289289 4926 generic.go:334] "Generic (PLEG): container finished" podID="b8674be0-e053-4d83-9a04-008800542315" containerID="f53a3f4aff921f5ec3762c205fe1bea56fadf89df3a4f1e13781dcac9dcd1de0" exitCode=0 Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.289373 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerDied","Data":"f53a3f4aff921f5ec3762c205fe1bea56fadf89df3a4f1e13781dcac9dcd1de0"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.291366 4926 generic.go:334] "Generic (PLEG): container finished" podID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerID="cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377" exitCode=0 Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.291464 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerDied","Data":"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.294398 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerStarted","Data":"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.296530 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerID="4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf" exitCode=0 Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.296586 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerDied","Data":"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.301979 4926 generic.go:334] "Generic (PLEG): container finished" podID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerID="c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14" exitCode=0 Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.302446 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerDied","Data":"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14"} Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.355716 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mcdg4" podStartSLOduration=2.821456366 podStartE2EDuration="1m35.35549689s" podCreationTimestamp="2025-11-22 10:42:14 +0000 UTC" firstStartedPulling="2025-11-22 10:42:16.190287822 +0000 UTC m=+156.491893109" lastFinishedPulling="2025-11-22 10:43:48.724328306 +0000 UTC m=+249.025933633" observedRunningTime="2025-11-22 10:43:49.331235937 +0000 UTC m=+249.632841254" watchObservedRunningTime="2025-11-22 10:43:49.35549689 +0000 UTC m=+249.657102187" Nov 22 10:43:49 crc kubenswrapper[4926]: I1122 10:43:49.375380 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-x6q97" podStartSLOduration=2.576030634 podStartE2EDuration="1m35.375346547s" podCreationTimestamp="2025-11-22 10:42:14 +0000 UTC" firstStartedPulling="2025-11-22 10:42:16.268599925 +0000 UTC m=+156.570205212" lastFinishedPulling="2025-11-22 10:43:49.067915798 +0000 UTC m=+249.369521125" observedRunningTime="2025-11-22 10:43:49.371214899 +0000 UTC m=+249.672820186" watchObservedRunningTime="2025-11-22 10:43:49.375346547 +0000 UTC m=+249.676951844" Nov 22 10:43:50 crc kubenswrapper[4926]: I1122 10:43:50.309713 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerStarted","Data":"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2"} Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.315670 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerStarted","Data":"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02"} Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.319414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerStarted","Data":"b59885bcd182306568e87c4aa4abe4a394e491a08fa41e867b4eebd10aecc93c"} Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.322138 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerStarted","Data":"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e"} Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.361515 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2vvkj" podStartSLOduration=2.59717356 podStartE2EDuration="1m39.361496615s" podCreationTimestamp="2025-11-22 10:42:12 +0000 UTC" firstStartedPulling="2025-11-22 10:42:14.08802245 +0000 UTC m=+154.389627747" lastFinishedPulling="2025-11-22 10:43:50.852345515 +0000 UTC m=+251.153950802" observedRunningTime="2025-11-22 10:43:51.338080586 +0000 UTC m=+251.639685873" watchObservedRunningTime="2025-11-22 10:43:51.361496615 +0000 UTC m=+251.663101902" Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.362004 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c2r2n" podStartSLOduration=2.7695452879999998 podStartE2EDuration="1m39.361999349s" podCreationTimestamp="2025-11-22 10:42:12 +0000 UTC" firstStartedPulling="2025-11-22 10:42:14.157294034 +0000 UTC m=+154.458899321" lastFinishedPulling="2025-11-22 10:43:50.749748095 +0000 UTC m=+251.051353382" observedRunningTime="2025-11-22 10:43:51.360847036 +0000 UTC m=+251.662452333" watchObservedRunningTime="2025-11-22 10:43:51.361999349 +0000 UTC m=+251.663604636" Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.387984 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-q4zdr" podStartSLOduration=4.166379976 podStartE2EDuration="1m36.387970611s" podCreationTimestamp="2025-11-22 10:42:15 +0000 UTC" firstStartedPulling="2025-11-22 10:42:17.443193147 +0000 UTC m=+157.744798434" lastFinishedPulling="2025-11-22 10:43:49.664783782 +0000 UTC m=+249.966389069" observedRunningTime="2025-11-22 10:43:51.387086715 +0000 UTC m=+251.688692022" watchObservedRunningTime="2025-11-22 10:43:51.387970611 +0000 UTC m=+251.689575898" Nov 22 10:43:51 crc kubenswrapper[4926]: I1122 10:43:51.406255 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4864b" podStartSLOduration=2.67843286 podStartE2EDuration="1m39.406239742s" podCreationTimestamp="2025-11-22 10:42:12 +0000 UTC" firstStartedPulling="2025-11-22 10:42:14.080644409 +0000 UTC m=+154.382249696" lastFinishedPulling="2025-11-22 10:43:50.808451291 +0000 UTC m=+251.110056578" observedRunningTime="2025-11-22 10:43:51.405293045 +0000 UTC m=+251.706898322" watchObservedRunningTime="2025-11-22 10:43:51.406239742 +0000 UTC m=+251.707845029" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.540640 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.540928 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.767163 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.768027 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.814832 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.930182 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.930473 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:52 crc kubenswrapper[4926]: I1122 10:43:52.980942 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:53 crc kubenswrapper[4926]: I1122 10:43:53.157624 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:43:53 crc kubenswrapper[4926]: I1122 10:43:53.157675 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:43:53 crc kubenswrapper[4926]: I1122 10:43:53.223388 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:43:53 crc kubenswrapper[4926]: I1122 10:43:53.388674 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:53 crc kubenswrapper[4926]: I1122 10:43:53.681441 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-4864b" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="registry-server" probeResult="failure" output=< Nov 22 10:43:53 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 10:43:53 crc kubenswrapper[4926]: > Nov 22 10:43:54 crc kubenswrapper[4926]: I1122 10:43:54.726566 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:43:54 crc kubenswrapper[4926]: I1122 10:43:54.727123 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:43:54 crc kubenswrapper[4926]: I1122 10:43:54.764704 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.143847 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.144679 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.194915 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.385064 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.389451 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.539117 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.785691 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.785738 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:43:55 crc kubenswrapper[4926]: I1122 10:43:55.828559 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:43:56 crc kubenswrapper[4926]: I1122 10:43:56.244728 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:43:56 crc kubenswrapper[4926]: I1122 10:43:56.244783 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:43:56 crc kubenswrapper[4926]: I1122 10:43:56.391917 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:43:56 crc kubenswrapper[4926]: I1122 10:43:56.445541 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:43:56 crc kubenswrapper[4926]: I1122 10:43:56.445821 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s7kfj" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="registry-server" containerID="cri-o://cd84f58c03359fada0518a37bb543e5f69283a0f74945b2db935ca499bc9852d" gracePeriod=2 Nov 22 10:43:57 crc kubenswrapper[4926]: I1122 10:43:57.282234 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-q4zdr" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="registry-server" probeResult="failure" output=< Nov 22 10:43:57 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 10:43:57 crc kubenswrapper[4926]: > Nov 22 10:43:57 crc kubenswrapper[4926]: I1122 10:43:57.355252 4926 generic.go:334] "Generic (PLEG): container finished" podID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerID="cd84f58c03359fada0518a37bb543e5f69283a0f74945b2db935ca499bc9852d" exitCode=0 Nov 22 10:43:57 crc kubenswrapper[4926]: I1122 10:43:57.355330 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerDied","Data":"cd84f58c03359fada0518a37bb543e5f69283a0f74945b2db935ca499bc9852d"} Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.110914 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.246613 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities\") pod \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.246708 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9sjl\" (UniqueName: \"kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl\") pod \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.246794 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content\") pod \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\" (UID: \"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c\") " Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.248415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities" (OuterVolumeSpecName: "utilities") pod "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" (UID: "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.252747 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl" (OuterVolumeSpecName: "kube-api-access-n9sjl") pod "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" (UID: "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c"). InnerVolumeSpecName "kube-api-access-n9sjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.299856 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" (UID: "4ada1ec1-1f41-40d3-a36d-3de32bb19c9c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.348386 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.348431 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9sjl\" (UniqueName: \"kubernetes.io/projected/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-kube-api-access-n9sjl\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.348444 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.363131 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s7kfj" event={"ID":"4ada1ec1-1f41-40d3-a36d-3de32bb19c9c","Type":"ContainerDied","Data":"ff671ab63b3078d6f7c97c946445b21ace2b55f9420a8b6ce9384161f3842bd6"} Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.363187 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s7kfj" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.363193 4926 scope.go:117] "RemoveContainer" containerID="cd84f58c03359fada0518a37bb543e5f69283a0f74945b2db935ca499bc9852d" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.385905 4926 scope.go:117] "RemoveContainer" containerID="d4d51d0c54c4c2eed9f21d4a12e3c215e806a7167ff287fd28a2486f451ac374" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.402019 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.403166 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s7kfj"] Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.405900 4926 scope.go:117] "RemoveContainer" containerID="10b1cc662a876506bc6144f9bd713983225c8537e26d8c8d52af72e95c22ba6f" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.588042 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" path="/var/lib/kubelet/pods/4ada1ec1-1f41-40d3-a36d-3de32bb19c9c/volumes" Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.847052 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:43:58 crc kubenswrapper[4926]: I1122 10:43:58.847298 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mcdg4" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="registry-server" containerID="cri-o://00f3165ed6a36ddf7f8dfb3af18205b838dc89458f8411c467deb09e4185ca15" gracePeriod=2 Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.393382 4926 generic.go:334] "Generic (PLEG): container finished" podID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerID="00f3165ed6a36ddf7f8dfb3af18205b838dc89458f8411c467deb09e4185ca15" exitCode=0 Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.393534 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerDied","Data":"00f3165ed6a36ddf7f8dfb3af18205b838dc89458f8411c467deb09e4185ca15"} Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.508191 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.574327 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities\") pod \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.574425 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9f557\" (UniqueName: \"kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557\") pod \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.574490 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content\") pod \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\" (UID: \"72e5abe7-12f0-4180-9d5c-9d55fe800bc5\") " Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.575320 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities" (OuterVolumeSpecName: "utilities") pod "72e5abe7-12f0-4180-9d5c-9d55fe800bc5" (UID: "72e5abe7-12f0-4180-9d5c-9d55fe800bc5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.606775 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557" (OuterVolumeSpecName: "kube-api-access-9f557") pod "72e5abe7-12f0-4180-9d5c-9d55fe800bc5" (UID: "72e5abe7-12f0-4180-9d5c-9d55fe800bc5"). InnerVolumeSpecName "kube-api-access-9f557". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.676351 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.676402 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9f557\" (UniqueName: \"kubernetes.io/projected/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-kube-api-access-9f557\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.714833 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "72e5abe7-12f0-4180-9d5c-9d55fe800bc5" (UID: "72e5abe7-12f0-4180-9d5c-9d55fe800bc5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:00 crc kubenswrapper[4926]: I1122 10:44:00.777149 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/72e5abe7-12f0-4180-9d5c-9d55fe800bc5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.401095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mcdg4" event={"ID":"72e5abe7-12f0-4180-9d5c-9d55fe800bc5","Type":"ContainerDied","Data":"7b3583df78b38abf491dbef737567b4bb4ed575f4695ddad396222cc002edc32"} Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.401145 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mcdg4" Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.401553 4926 scope.go:117] "RemoveContainer" containerID="00f3165ed6a36ddf7f8dfb3af18205b838dc89458f8411c467deb09e4185ca15" Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.426539 4926 scope.go:117] "RemoveContainer" containerID="82f8048d84fbcd886fed79b2b3a99886018676e6da3cb50a12678b5d6ad2223e" Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.430226 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.433938 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mcdg4"] Nov 22 10:44:01 crc kubenswrapper[4926]: I1122 10:44:01.456714 4926 scope.go:117] "RemoveContainer" containerID="437a77cb04006d727d4132dc98dae799930c277e416c9f4bc3f6ca44c97143cb" Nov 22 10:44:02 crc kubenswrapper[4926]: I1122 10:44:02.597685 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" path="/var/lib/kubelet/pods/72e5abe7-12f0-4180-9d5c-9d55fe800bc5/volumes" Nov 22 10:44:02 crc kubenswrapper[4926]: I1122 10:44:02.599023 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:44:02 crc kubenswrapper[4926]: I1122 10:44:02.647042 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:44:02 crc kubenswrapper[4926]: I1122 10:44:02.803516 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:44:03 crc kubenswrapper[4926]: I1122 10:44:03.197976 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:44:06 crc kubenswrapper[4926]: I1122 10:44:06.278614 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:44:06 crc kubenswrapper[4926]: I1122 10:44:06.314717 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:44:06 crc kubenswrapper[4926]: I1122 10:44:06.647994 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:44:06 crc kubenswrapper[4926]: I1122 10:44:06.648238 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2vvkj" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="registry-server" containerID="cri-o://7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02" gracePeriod=2 Nov 22 10:44:06 crc kubenswrapper[4926]: I1122 10:44:06.995924 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.153312 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vm4gs\" (UniqueName: \"kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs\") pod \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.153373 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content\") pod \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.153456 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities\") pod \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\" (UID: \"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1\") " Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.154280 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities" (OuterVolumeSpecName: "utilities") pod "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" (UID: "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.159826 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs" (OuterVolumeSpecName: "kube-api-access-vm4gs") pod "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" (UID: "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1"). InnerVolumeSpecName "kube-api-access-vm4gs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.203867 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" (UID: "f0547d70-c2f4-4d26-ad73-9f65df8b1bc1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.255136 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vm4gs\" (UniqueName: \"kubernetes.io/projected/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-kube-api-access-vm4gs\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.255169 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.255178 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.430667 4926 generic.go:334] "Generic (PLEG): container finished" podID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerID="7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02" exitCode=0 Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.430708 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerDied","Data":"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02"} Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.430759 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2vvkj" event={"ID":"f0547d70-c2f4-4d26-ad73-9f65df8b1bc1","Type":"ContainerDied","Data":"66eda2fe6f592d117117b9ab5c56c7007315b5399036fb216863019e97e3db11"} Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.430779 4926 scope.go:117] "RemoveContainer" containerID="7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.430785 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2vvkj" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.451522 4926 scope.go:117] "RemoveContainer" containerID="c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.459650 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.464550 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2vvkj"] Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.476389 4926 scope.go:117] "RemoveContainer" containerID="3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.493742 4926 scope.go:117] "RemoveContainer" containerID="7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02" Nov 22 10:44:07 crc kubenswrapper[4926]: E1122 10:44:07.494269 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02\": container with ID starting with 7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02 not found: ID does not exist" containerID="7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.494304 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02"} err="failed to get container status \"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02\": rpc error: code = NotFound desc = could not find container \"7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02\": container with ID starting with 7a39a59f31fcc701b490a766954084b6b54ae99654c2821d925bdca09e201e02 not found: ID does not exist" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.494332 4926 scope.go:117] "RemoveContainer" containerID="c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14" Nov 22 10:44:07 crc kubenswrapper[4926]: E1122 10:44:07.494583 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14\": container with ID starting with c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14 not found: ID does not exist" containerID="c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.494607 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14"} err="failed to get container status \"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14\": rpc error: code = NotFound desc = could not find container \"c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14\": container with ID starting with c8360b9755b9412f704f7b0b3960284df7a7738391dc520406ab7149dac3eb14 not found: ID does not exist" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.494625 4926 scope.go:117] "RemoveContainer" containerID="3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9" Nov 22 10:44:07 crc kubenswrapper[4926]: E1122 10:44:07.494842 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9\": container with ID starting with 3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9 not found: ID does not exist" containerID="3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9" Nov 22 10:44:07 crc kubenswrapper[4926]: I1122 10:44:07.494868 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9"} err="failed to get container status \"3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9\": rpc error: code = NotFound desc = could not find container \"3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9\": container with ID starting with 3d78e1e93471eebfb2e0e2a852fc44f821e4325c33d30acbe56dbed05f33ccf9 not found: ID does not exist" Nov 22 10:44:08 crc kubenswrapper[4926]: I1122 10:44:08.587277 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" path="/var/lib/kubelet/pods/f0547d70-c2f4-4d26-ad73-9f65df8b1bc1/volumes" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.045121 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.045352 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-q4zdr" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="registry-server" containerID="cri-o://0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2" gracePeriod=2 Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.434409 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.444024 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerID="0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2" exitCode=0 Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.444074 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerDied","Data":"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2"} Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.444105 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-q4zdr" event={"ID":"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef","Type":"ContainerDied","Data":"e8f57f9e6cd789fc91d8904c14dc5491420480e03935accd1a09ffc7ef6c32cf"} Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.444126 4926 scope.go:117] "RemoveContainer" containerID="0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.444240 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-q4zdr" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.460254 4926 scope.go:117] "RemoveContainer" containerID="4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.481992 4926 scope.go:117] "RemoveContainer" containerID="54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.502914 4926 scope.go:117] "RemoveContainer" containerID="0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2" Nov 22 10:44:09 crc kubenswrapper[4926]: E1122 10:44:09.504896 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2\": container with ID starting with 0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2 not found: ID does not exist" containerID="0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.504938 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2"} err="failed to get container status \"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2\": rpc error: code = NotFound desc = could not find container \"0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2\": container with ID starting with 0611581e39822d9a52e99b2fc3298bee95743321d50cb6edfce9f87e84780ef2 not found: ID does not exist" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.504967 4926 scope.go:117] "RemoveContainer" containerID="4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf" Nov 22 10:44:09 crc kubenswrapper[4926]: E1122 10:44:09.505278 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf\": container with ID starting with 4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf not found: ID does not exist" containerID="4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.505304 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf"} err="failed to get container status \"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf\": rpc error: code = NotFound desc = could not find container \"4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf\": container with ID starting with 4eb845950dd6de5a211ffe4be7fac8f819cec3988c2305df8ab05d8ba18fcbaf not found: ID does not exist" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.505322 4926 scope.go:117] "RemoveContainer" containerID="54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb" Nov 22 10:44:09 crc kubenswrapper[4926]: E1122 10:44:09.505573 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb\": container with ID starting with 54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb not found: ID does not exist" containerID="54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.505593 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb"} err="failed to get container status \"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb\": rpc error: code = NotFound desc = could not find container \"54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb\": container with ID starting with 54f14d7f7bc8aab71b973bcfd4f9453e59e9c9c49f86c5ce693bb23f81cca8cb not found: ID does not exist" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.590968 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities\") pod \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.591016 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l88q7\" (UniqueName: \"kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7\") pod \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.591042 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content\") pod \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\" (UID: \"1ddd6078-62c2-454b-8a1c-68ea18b0a7ef\") " Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.598919 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities" (OuterVolumeSpecName: "utilities") pod "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" (UID: "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.603362 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7" (OuterVolumeSpecName: "kube-api-access-l88q7") pod "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" (UID: "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef"). InnerVolumeSpecName "kube-api-access-l88q7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.685947 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" (UID: "1ddd6078-62c2-454b-8a1c-68ea18b0a7ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.693102 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.693241 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l88q7\" (UniqueName: \"kubernetes.io/projected/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-kube-api-access-l88q7\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.693353 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.775920 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:44:09 crc kubenswrapper[4926]: I1122 10:44:09.779546 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-q4zdr"] Nov 22 10:44:10 crc kubenswrapper[4926]: I1122 10:44:10.588753 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" path="/var/lib/kubelet/pods/1ddd6078-62c2-454b-8a1c-68ea18b0a7ef/volumes" Nov 22 10:44:20 crc kubenswrapper[4926]: I1122 10:44:20.578846 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" containerID="cri-o://629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566" gracePeriod=15 Nov 22 10:44:20 crc kubenswrapper[4926]: I1122 10:44:20.986005 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.062824 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.063218 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.063610 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66l87\" (UniqueName: \"kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.063784 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.063925 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.063995 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064153 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064194 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064403 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064440 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064465 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064503 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064533 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064558 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064578 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle\") pod \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\" (UID: \"62b979b0-adbc-4c29-a08d-55bb2d07fc6c\") " Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064761 4926 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.064773 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.065536 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.066419 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.068278 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.068794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87" (OuterVolumeSpecName: "kube-api-access-66l87") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "kube-api-access-66l87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.068856 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.070382 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.071520 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.078164 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.089437 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.094074 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.094374 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.095093 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "62b979b0-adbc-4c29-a08d-55bb2d07fc6c" (UID: "62b979b0-adbc-4c29-a08d-55bb2d07fc6c"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165860 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66l87\" (UniqueName: \"kubernetes.io/projected/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-kube-api-access-66l87\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165909 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165945 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165955 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165966 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165975 4926 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165984 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.165993 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.166001 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.166011 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.166021 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.166030 4926 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/62b979b0-adbc-4c29-a08d-55bb2d07fc6c-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.513850 4926 generic.go:334] "Generic (PLEG): container finished" podID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerID="629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566" exitCode=0 Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.513912 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" event={"ID":"62b979b0-adbc-4c29-a08d-55bb2d07fc6c","Type":"ContainerDied","Data":"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566"} Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.513943 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" event={"ID":"62b979b0-adbc-4c29-a08d-55bb2d07fc6c","Type":"ContainerDied","Data":"5a93a4185a03ebe14af53da003ad356cd9d77a7b11199b450245ff54434552da"} Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.513949 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-g6m6g" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.513960 4926 scope.go:117] "RemoveContainer" containerID="629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.530322 4926 scope.go:117] "RemoveContainer" containerID="629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566" Nov 22 10:44:21 crc kubenswrapper[4926]: E1122 10:44:21.530868 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566\": container with ID starting with 629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566 not found: ID does not exist" containerID="629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.530914 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566"} err="failed to get container status \"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566\": rpc error: code = NotFound desc = could not find container \"629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566\": container with ID starting with 629f4420a1b6991950139f29c55fbc6c0627afc956be31202fd3f0cffeacd566 not found: ID does not exist" Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.538986 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:44:21 crc kubenswrapper[4926]: I1122 10:44:21.546613 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-g6m6g"] Nov 22 10:44:22 crc kubenswrapper[4926]: I1122 10:44:22.591748 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" path="/var/lib/kubelet/pods/62b979b0-adbc-4c29-a08d-55bb2d07fc6c/volumes" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.389571 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-7c49d57bcc-t69xr"] Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390156 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390178 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390196 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390208 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390225 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390236 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390252 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390264 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390283 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390294 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390307 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="404e486e-98a0-4803-bb70-2db849a922ea" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390318 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="404e486e-98a0-4803-bb70-2db849a922ea" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390334 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14bd2176-9eab-42ea-b510-b59e08e5c89a" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390345 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="14bd2176-9eab-42ea-b510-b59e08e5c89a" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390364 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390375 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390388 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390401 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390420 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390432 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="extract-content" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390447 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390459 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390481 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390493 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390511 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390522 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390539 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390551 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="extract-utilities" Nov 22 10:44:25 crc kubenswrapper[4926]: E1122 10:44:25.390574 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390586 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390735 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="404e486e-98a0-4803-bb70-2db849a922ea" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390752 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0547d70-c2f4-4d26-ad73-9f65df8b1bc1" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390773 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ddd6078-62c2-454b-8a1c-68ea18b0a7ef" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390787 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="72e5abe7-12f0-4180-9d5c-9d55fe800bc5" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390834 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62b979b0-adbc-4c29-a08d-55bb2d07fc6c" containerName="oauth-openshift" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390854 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="14bd2176-9eab-42ea-b510-b59e08e5c89a" containerName="pruner" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.390870 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ada1ec1-1f41-40d3-a36d-3de32bb19c9c" containerName="registry-server" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.391452 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.394708 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.395594 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.396782 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.396965 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.397294 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.398281 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.398363 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.398876 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.399131 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.399676 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.400117 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.400964 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.411966 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c49d57bcc-t69xr"] Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.420058 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.420279 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.425395 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522291 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522376 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-error\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522482 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522588 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522754 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522799 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-policies\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522860 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-dir\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522952 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.522982 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-session\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.523028 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.523050 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-login\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.523126 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b8lt\" (UniqueName: \"kubernetes.io/projected/eae6e034-553f-4df9-8a7b-1e213afbbb38-kube-api-access-8b8lt\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624245 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-policies\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624325 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-dir\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624370 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624407 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-session\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624489 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-login\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-dir\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624535 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b8lt\" (UniqueName: \"kubernetes.io/projected/eae6e034-553f-4df9-8a7b-1e213afbbb38-kube-api-access-8b8lt\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624834 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-error\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624929 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.624994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.625055 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.625106 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.625202 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.626511 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-service-ca\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.626680 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-audit-policies\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.626705 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-cliconfig\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.626718 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.634544 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-error\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.634654 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.635647 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-session\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.636672 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-serving-cert\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.636694 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.636848 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-router-certs\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.636864 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.638503 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/eae6e034-553f-4df9-8a7b-1e213afbbb38-v4-0-config-user-template-login\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.657975 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b8lt\" (UniqueName: \"kubernetes.io/projected/eae6e034-553f-4df9-8a7b-1e213afbbb38-kube-api-access-8b8lt\") pod \"oauth-openshift-7c49d57bcc-t69xr\" (UID: \"eae6e034-553f-4df9-8a7b-1e213afbbb38\") " pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.725073 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:25 crc kubenswrapper[4926]: I1122 10:44:25.986249 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-7c49d57bcc-t69xr"] Nov 22 10:44:26 crc kubenswrapper[4926]: W1122 10:44:26.001233 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeae6e034_553f_4df9_8a7b_1e213afbbb38.slice/crio-23abcdcf246673c20c738bd9968b8eb1224c4b7c47a262ff56598de24b6c4801 WatchSource:0}: Error finding container 23abcdcf246673c20c738bd9968b8eb1224c4b7c47a262ff56598de24b6c4801: Status 404 returned error can't find the container with id 23abcdcf246673c20c738bd9968b8eb1224c4b7c47a262ff56598de24b6c4801 Nov 22 10:44:26 crc kubenswrapper[4926]: I1122 10:44:26.565427 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" event={"ID":"eae6e034-553f-4df9-8a7b-1e213afbbb38","Type":"ContainerStarted","Data":"1875e4d7a7ac3380be506d2155b0cad5aeb860630a7695ee98441c5009574062"} Nov 22 10:44:26 crc kubenswrapper[4926]: I1122 10:44:26.565483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" event={"ID":"eae6e034-553f-4df9-8a7b-1e213afbbb38","Type":"ContainerStarted","Data":"23abcdcf246673c20c738bd9968b8eb1224c4b7c47a262ff56598de24b6c4801"} Nov 22 10:44:26 crc kubenswrapper[4926]: I1122 10:44:26.565879 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:26 crc kubenswrapper[4926]: I1122 10:44:26.598940 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" podStartSLOduration=31.598859116 podStartE2EDuration="31.598859116s" podCreationTimestamp="2025-11-22 10:43:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:44:26.590816316 +0000 UTC m=+286.892421603" watchObservedRunningTime="2025-11-22 10:44:26.598859116 +0000 UTC m=+286.900464443" Nov 22 10:44:26 crc kubenswrapper[4926]: I1122 10:44:26.832862 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-7c49d57bcc-t69xr" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.507239 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.507905 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c2r2n" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="registry-server" containerID="cri-o://b59885bcd182306568e87c4aa4abe4a394e491a08fa41e867b4eebd10aecc93c" gracePeriod=30 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.545046 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.545364 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4864b" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="registry-server" containerID="cri-o://393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e" gracePeriod=30 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.555084 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.555412 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" containerID="cri-o://fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397" gracePeriod=30 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.565573 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.567274 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-x6q97" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="registry-server" containerID="cri-o://96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" gracePeriod=30 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.575736 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.576284 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wxk9m" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="registry-server" containerID="cri-o://f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6" gracePeriod=30 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.649797 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b7gxf"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.651087 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.660655 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b7gxf"] Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.673916 4926 generic.go:334] "Generic (PLEG): container finished" podID="b8674be0-e053-4d83-9a04-008800542315" containerID="b59885bcd182306568e87c4aa4abe4a394e491a08fa41e867b4eebd10aecc93c" exitCode=0 Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.673960 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerDied","Data":"b59885bcd182306568e87c4aa4abe4a394e491a08fa41e867b4eebd10aecc93c"} Nov 22 10:44:44 crc kubenswrapper[4926]: E1122 10:44:44.751007 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a is running failed: container process not found" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:44:44 crc kubenswrapper[4926]: E1122 10:44:44.751561 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a is running failed: container process not found" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:44:44 crc kubenswrapper[4926]: E1122 10:44:44.752347 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a is running failed: container process not found" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" cmd=["grpc_health_probe","-addr=:50051"] Nov 22 10:44:44 crc kubenswrapper[4926]: E1122 10:44:44.752382 4926 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-x6q97" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="registry-server" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.809448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.809494 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvhff\" (UniqueName: \"kubernetes.io/projected/b71cda98-e97f-4b9c-93d9-74c8cabe6420-kube-api-access-hvhff\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.809542 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.910670 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.911180 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.911205 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvhff\" (UniqueName: \"kubernetes.io/projected/b71cda98-e97f-4b9c-93d9-74c8cabe6420-kube-api-access-hvhff\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.914056 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.919345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b71cda98-e97f-4b9c-93d9-74c8cabe6420-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.931076 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvhff\" (UniqueName: \"kubernetes.io/projected/b71cda98-e97f-4b9c-93d9-74c8cabe6420-kube-api-access-hvhff\") pod \"marketplace-operator-79b997595-b7gxf\" (UID: \"b71cda98-e97f-4b9c-93d9-74c8cabe6420\") " pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.983215 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.990350 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.994424 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:44:44 crc kubenswrapper[4926]: I1122 10:44:44.997608 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.061653 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.114427 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities\") pod \"8ea24004-1068-4fad-a694-c92251db240d\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115321 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities" (OuterVolumeSpecName: "utilities") pod "8ea24004-1068-4fad-a694-c92251db240d" (UID: "8ea24004-1068-4fad-a694-c92251db240d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115390 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content\") pod \"8ea24004-1068-4fad-a694-c92251db240d\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115452 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content\") pod \"b8674be0-e053-4d83-9a04-008800542315\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115488 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ms456\" (UniqueName: \"kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456\") pod \"b8674be0-e053-4d83-9a04-008800542315\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115507 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities\") pod \"c34f3883-c9b2-41fe-9a17-127918e9ef88\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115535 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities\") pod \"b8674be0-e053-4d83-9a04-008800542315\" (UID: \"b8674be0-e053-4d83-9a04-008800542315\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115563 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgbtd\" (UniqueName: \"kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd\") pod \"c34f3883-c9b2-41fe-9a17-127918e9ef88\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115595 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dl7st\" (UniqueName: \"kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st\") pod \"8ea24004-1068-4fad-a694-c92251db240d\" (UID: \"8ea24004-1068-4fad-a694-c92251db240d\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115615 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content\") pod \"c34f3883-c9b2-41fe-9a17-127918e9ef88\" (UID: \"c34f3883-c9b2-41fe-9a17-127918e9ef88\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.115839 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.116703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities" (OuterVolumeSpecName: "utilities") pod "c34f3883-c9b2-41fe-9a17-127918e9ef88" (UID: "c34f3883-c9b2-41fe-9a17-127918e9ef88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.121059 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456" (OuterVolumeSpecName: "kube-api-access-ms456") pod "b8674be0-e053-4d83-9a04-008800542315" (UID: "b8674be0-e053-4d83-9a04-008800542315"). InnerVolumeSpecName "kube-api-access-ms456". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.121149 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd" (OuterVolumeSpecName: "kube-api-access-kgbtd") pod "c34f3883-c9b2-41fe-9a17-127918e9ef88" (UID: "c34f3883-c9b2-41fe-9a17-127918e9ef88"). InnerVolumeSpecName "kube-api-access-kgbtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.121220 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st" (OuterVolumeSpecName: "kube-api-access-dl7st") pod "8ea24004-1068-4fad-a694-c92251db240d" (UID: "8ea24004-1068-4fad-a694-c92251db240d"). InnerVolumeSpecName "kube-api-access-dl7st". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.132718 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities" (OuterVolumeSpecName: "utilities") pod "b8674be0-e053-4d83-9a04-008800542315" (UID: "b8674be0-e053-4d83-9a04-008800542315"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.140491 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ea24004-1068-4fad-a694-c92251db240d" (UID: "8ea24004-1068-4fad-a694-c92251db240d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.173775 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c34f3883-c9b2-41fe-9a17-127918e9ef88" (UID: "c34f3883-c9b2-41fe-9a17-127918e9ef88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.176470 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8674be0-e053-4d83-9a04-008800542315" (UID: "b8674be0-e053-4d83-9a04-008800542315"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216349 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4n575\" (UniqueName: \"kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575\") pod \"4d60f8f8-4bda-476b-b339-e29ec2713912\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216421 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content\") pod \"4d60f8f8-4bda-476b-b339-e29ec2713912\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216494 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities\") pod \"4d60f8f8-4bda-476b-b339-e29ec2713912\" (UID: \"4d60f8f8-4bda-476b-b339-e29ec2713912\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216705 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ms456\" (UniqueName: \"kubernetes.io/projected/b8674be0-e053-4d83-9a04-008800542315-kube-api-access-ms456\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216718 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216726 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216734 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgbtd\" (UniqueName: \"kubernetes.io/projected/c34f3883-c9b2-41fe-9a17-127918e9ef88-kube-api-access-kgbtd\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216745 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dl7st\" (UniqueName: \"kubernetes.io/projected/8ea24004-1068-4fad-a694-c92251db240d-kube-api-access-dl7st\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216753 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c34f3883-c9b2-41fe-9a17-127918e9ef88-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216761 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ea24004-1068-4fad-a694-c92251db240d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.216777 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8674be0-e053-4d83-9a04-008800542315-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.217323 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities" (OuterVolumeSpecName: "utilities") pod "4d60f8f8-4bda-476b-b339-e29ec2713912" (UID: "4d60f8f8-4bda-476b-b339-e29ec2713912"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.220109 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575" (OuterVolumeSpecName: "kube-api-access-4n575") pod "4d60f8f8-4bda-476b-b339-e29ec2713912" (UID: "4d60f8f8-4bda-476b-b339-e29ec2713912"). InnerVolumeSpecName "kube-api-access-4n575". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.314100 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4d60f8f8-4bda-476b-b339-e29ec2713912" (UID: "4d60f8f8-4bda-476b-b339-e29ec2713912"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.317499 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.317531 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4n575\" (UniqueName: \"kubernetes.io/projected/4d60f8f8-4bda-476b-b339-e29ec2713912-kube-api-access-4n575\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.317545 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4d60f8f8-4bda-476b-b339-e29ec2713912-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.350014 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.418363 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics\") pod \"4943246c-40df-4927-8380-b7d2804a17f7\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.418424 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st54q\" (UniqueName: \"kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q\") pod \"4943246c-40df-4927-8380-b7d2804a17f7\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.418489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca\") pod \"4943246c-40df-4927-8380-b7d2804a17f7\" (UID: \"4943246c-40df-4927-8380-b7d2804a17f7\") " Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.419207 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "4943246c-40df-4927-8380-b7d2804a17f7" (UID: "4943246c-40df-4927-8380-b7d2804a17f7"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.419496 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-b7gxf"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.421638 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "4943246c-40df-4927-8380-b7d2804a17f7" (UID: "4943246c-40df-4927-8380-b7d2804a17f7"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.421808 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q" (OuterVolumeSpecName: "kube-api-access-st54q") pod "4943246c-40df-4927-8380-b7d2804a17f7" (UID: "4943246c-40df-4927-8380-b7d2804a17f7"). InnerVolumeSpecName "kube-api-access-st54q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:44:45 crc kubenswrapper[4926]: W1122 10:44:45.427322 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb71cda98_e97f_4b9c_93d9_74c8cabe6420.slice/crio-99d4cc15ed23d153ecb692013fda7d90db178c2a50e0a82f69bfd9bbd8c376a2 WatchSource:0}: Error finding container 99d4cc15ed23d153ecb692013fda7d90db178c2a50e0a82f69bfd9bbd8c376a2: Status 404 returned error can't find the container with id 99d4cc15ed23d153ecb692013fda7d90db178c2a50e0a82f69bfd9bbd8c376a2 Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.520169 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.520492 4926 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/4943246c-40df-4927-8380-b7d2804a17f7-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.520504 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st54q\" (UniqueName: \"kubernetes.io/projected/4943246c-40df-4927-8380-b7d2804a17f7-kube-api-access-st54q\") on node \"crc\" DevicePath \"\"" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.680297 4926 generic.go:334] "Generic (PLEG): container finished" podID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerID="393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e" exitCode=0 Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.680373 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4864b" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.680375 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerDied","Data":"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.680487 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4864b" event={"ID":"c34f3883-c9b2-41fe-9a17-127918e9ef88","Type":"ContainerDied","Data":"a7fbe18a98d07156d38a13e01ae344c65a27aca1b4368360aa4ea9c12051d596"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.680526 4926 scope.go:117] "RemoveContainer" containerID="393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.682952 4926 generic.go:334] "Generic (PLEG): container finished" podID="8ea24004-1068-4fad-a694-c92251db240d" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" exitCode=0 Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.683014 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerDied","Data":"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.683039 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-x6q97" event={"ID":"8ea24004-1068-4fad-a694-c92251db240d","Type":"ContainerDied","Data":"ca376f0daf1c3f8fd8be9865784c76d02be809031927638b3cb37ad948686701"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.683111 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-x6q97" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.690684 4926 generic.go:334] "Generic (PLEG): container finished" podID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerID="f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6" exitCode=0 Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.690788 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wxk9m" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.691408 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerDied","Data":"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.691452 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wxk9m" event={"ID":"4d60f8f8-4bda-476b-b339-e29ec2713912","Type":"ContainerDied","Data":"ceaca808fb030ec63ac8dca9d0411d5fb272e9c6f58593f973d59614d181dd85"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.693927 4926 scope.go:117] "RemoveContainer" containerID="cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.694033 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" event={"ID":"b71cda98-e97f-4b9c-93d9-74c8cabe6420","Type":"ContainerStarted","Data":"0248db98d8ad43b92673e5b7f24d4c5a4c792fbc902f591c0c35c95ce3749592"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.694047 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" event={"ID":"b71cda98-e97f-4b9c-93d9-74c8cabe6420","Type":"ContainerStarted","Data":"99d4cc15ed23d153ecb692013fda7d90db178c2a50e0a82f69bfd9bbd8c376a2"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.694398 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.695952 4926 generic.go:334] "Generic (PLEG): container finished" podID="4943246c-40df-4927-8380-b7d2804a17f7" containerID="fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397" exitCode=0 Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.695991 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" event={"ID":"4943246c-40df-4927-8380-b7d2804a17f7","Type":"ContainerDied","Data":"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.696014 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" event={"ID":"4943246c-40df-4927-8380-b7d2804a17f7","Type":"ContainerDied","Data":"393815e471efcaa36bedf893a46f88aabbf3f11a3a275a9986f752c4d3e48211"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.696045 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-cm8rb" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.699339 4926 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-b7gxf container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.699425 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" podUID="b71cda98-e97f-4b9c-93d9-74c8cabe6420" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.711308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2r2n" event={"ID":"b8674be0-e053-4d83-9a04-008800542315","Type":"ContainerDied","Data":"905ff7d3e0dc94ea6a581890a54d9d0bbc11affe373212c13fe45eb7a602b85a"} Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.711462 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2r2n" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.711715 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" podStartSLOduration=1.7116969979999999 podStartE2EDuration="1.711696998s" podCreationTimestamp="2025-11-22 10:44:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:44:45.711359629 +0000 UTC m=+306.012964916" watchObservedRunningTime="2025-11-22 10:44:45.711696998 +0000 UTC m=+306.013302285" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.721054 4926 scope.go:117] "RemoveContainer" containerID="28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.741712 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.742148 4926 scope.go:117] "RemoveContainer" containerID="393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.743487 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e\": container with ID starting with 393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e not found: ID does not exist" containerID="393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.743520 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e"} err="failed to get container status \"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e\": rpc error: code = NotFound desc = could not find container \"393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e\": container with ID starting with 393f64ed1749d16d6219d2bf39288c10d920e6814c6f4c0495b98a6c96e43a9e not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.743542 4926 scope.go:117] "RemoveContainer" containerID="cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.743925 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377\": container with ID starting with cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377 not found: ID does not exist" containerID="cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.743955 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377"} err="failed to get container status \"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377\": rpc error: code = NotFound desc = could not find container \"cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377\": container with ID starting with cf7381fdc23a6a053988e57f7b2f55713ce8af059d871b75461cd977fbf4e377 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.743980 4926 scope.go:117] "RemoveContainer" containerID="28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.744238 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4\": container with ID starting with 28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4 not found: ID does not exist" containerID="28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.744264 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4"} err="failed to get container status \"28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4\": rpc error: code = NotFound desc = could not find container \"28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4\": container with ID starting with 28a382c1dd49adeffb8b19110b6b6dde0b6cc27eb6eee629873a16e67c9f43e4 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.744281 4926 scope.go:117] "RemoveContainer" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.746006 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4864b"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.757532 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.759550 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-x6q97"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.761711 4926 scope.go:117] "RemoveContainer" containerID="8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.770342 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.777418 4926 scope.go:117] "RemoveContainer" containerID="bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.777615 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-cm8rb"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.788821 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.789590 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c2r2n"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.794259 4926 scope.go:117] "RemoveContainer" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.794677 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a\": container with ID starting with 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a not found: ID does not exist" containerID="96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.794796 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a"} err="failed to get container status \"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a\": rpc error: code = NotFound desc = could not find container \"96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a\": container with ID starting with 96332c28d2eda4da5c6d901aaf32edcdae793cab0d6da57ee12265e08f63055a not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.794894 4926 scope.go:117] "RemoveContainer" containerID="8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.795238 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024\": container with ID starting with 8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024 not found: ID does not exist" containerID="8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.795282 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024"} err="failed to get container status \"8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024\": rpc error: code = NotFound desc = could not find container \"8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024\": container with ID starting with 8ed11a998a2e548cf89821f9bee25c153847b05a7e90327f94b741f29c415024 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.795319 4926 scope.go:117] "RemoveContainer" containerID="bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.795986 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1\": container with ID starting with bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1 not found: ID does not exist" containerID="bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.796084 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1"} err="failed to get container status \"bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1\": rpc error: code = NotFound desc = could not find container \"bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1\": container with ID starting with bae30c0089473e138c3f630184a70275adbf84b169ac4de7e18f4cae806a60b1 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.796155 4926 scope.go:117] "RemoveContainer" containerID="f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.802722 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.805145 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wxk9m"] Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.820897 4926 scope.go:117] "RemoveContainer" containerID="d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.833572 4926 scope.go:117] "RemoveContainer" containerID="8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.844950 4926 scope.go:117] "RemoveContainer" containerID="f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.845271 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6\": container with ID starting with f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6 not found: ID does not exist" containerID="f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.845301 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6"} err="failed to get container status \"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6\": rpc error: code = NotFound desc = could not find container \"f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6\": container with ID starting with f92d85d46dfd567fb9143b6efcdf84645b17902e0749bb7824ecc9a354abb1b6 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.845324 4926 scope.go:117] "RemoveContainer" containerID="d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.845758 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4\": container with ID starting with d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4 not found: ID does not exist" containerID="d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.845778 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4"} err="failed to get container status \"d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4\": rpc error: code = NotFound desc = could not find container \"d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4\": container with ID starting with d36386b206b20b7eed7dba918003bf2e6d7a4234d0b635902ba502d4766ac4c4 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.845793 4926 scope.go:117] "RemoveContainer" containerID="8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.846135 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4\": container with ID starting with 8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4 not found: ID does not exist" containerID="8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.846239 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4"} err="failed to get container status \"8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4\": rpc error: code = NotFound desc = could not find container \"8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4\": container with ID starting with 8de13323548baca110d84343130dc7687a45bf6732e90eb10db3246abe80b5f4 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.846323 4926 scope.go:117] "RemoveContainer" containerID="fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.860284 4926 scope.go:117] "RemoveContainer" containerID="fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397" Nov 22 10:44:45 crc kubenswrapper[4926]: E1122 10:44:45.860638 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397\": container with ID starting with fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397 not found: ID does not exist" containerID="fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.860678 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397"} err="failed to get container status \"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397\": rpc error: code = NotFound desc = could not find container \"fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397\": container with ID starting with fc207104e5a09fb5729483274c5d7f4defae319a8d902df57714a5315bb14397 not found: ID does not exist" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.860714 4926 scope.go:117] "RemoveContainer" containerID="b59885bcd182306568e87c4aa4abe4a394e491a08fa41e867b4eebd10aecc93c" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.872033 4926 scope.go:117] "RemoveContainer" containerID="f53a3f4aff921f5ec3762c205fe1bea56fadf89df3a4f1e13781dcac9dcd1de0" Nov 22 10:44:45 crc kubenswrapper[4926]: I1122 10:44:45.884145 4926 scope.go:117] "RemoveContainer" containerID="ac229eb4924c7030ab410fb7d0377b026c3342b5828e8318068b90be7bff47cf" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.589721 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4943246c-40df-4927-8380-b7d2804a17f7" path="/var/lib/kubelet/pods/4943246c-40df-4927-8380-b7d2804a17f7/volumes" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.590443 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" path="/var/lib/kubelet/pods/4d60f8f8-4bda-476b-b339-e29ec2713912/volumes" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.591584 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ea24004-1068-4fad-a694-c92251db240d" path="/var/lib/kubelet/pods/8ea24004-1068-4fad-a694-c92251db240d/volumes" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.592343 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8674be0-e053-4d83-9a04-008800542315" path="/var/lib/kubelet/pods/b8674be0-e053-4d83-9a04-008800542315/volumes" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.593095 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" path="/var/lib/kubelet/pods/c34f3883-c9b2-41fe-9a17-127918e9ef88/volumes" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.724795 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wlbxq"] Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725030 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725046 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725059 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725067 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725077 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725086 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725098 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725107 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725117 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725125 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725132 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725138 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725145 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725152 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725160 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725166 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725174 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725180 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725189 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725194 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="extract-utilities" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725200 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725207 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725216 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725222 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: E1122 10:44:46.725231 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725236 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="extract-content" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725308 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8674be0-e053-4d83-9a04-008800542315" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725316 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d60f8f8-4bda-476b-b339-e29ec2713912" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725328 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4943246c-40df-4927-8380-b7d2804a17f7" containerName="marketplace-operator" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725336 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ea24004-1068-4fad-a694-c92251db240d" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.725343 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c34f3883-c9b2-41fe-9a17-127918e9ef88" containerName="registry-server" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.726026 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.727641 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.732925 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-b7gxf" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.744980 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlbxq"] Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.840470 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-catalog-content\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.840537 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kgwz\" (UniqueName: \"kubernetes.io/projected/800e7d7e-7580-482b-bf81-728557492bcd-kube-api-access-6kgwz\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.840561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-utilities\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.924022 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-szcbp"] Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.925666 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.928578 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.934006 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szcbp"] Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.961694 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-catalog-content\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.961768 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kgwz\" (UniqueName: \"kubernetes.io/projected/800e7d7e-7580-482b-bf81-728557492bcd-kube-api-access-6kgwz\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.961799 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-utilities\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.962169 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-catalog-content\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.962280 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/800e7d7e-7580-482b-bf81-728557492bcd-utilities\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:46 crc kubenswrapper[4926]: I1122 10:44:46.981737 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kgwz\" (UniqueName: \"kubernetes.io/projected/800e7d7e-7580-482b-bf81-728557492bcd-kube-api-access-6kgwz\") pod \"certified-operators-wlbxq\" (UID: \"800e7d7e-7580-482b-bf81-728557492bcd\") " pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.043819 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.063228 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-utilities\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.063275 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-catalog-content\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.063325 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54zvg\" (UniqueName: \"kubernetes.io/projected/1d1eac02-20b5-4cfc-add3-a5f9d687455b-kube-api-access-54zvg\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.164802 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-utilities\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.165144 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-catalog-content\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.165218 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54zvg\" (UniqueName: \"kubernetes.io/projected/1d1eac02-20b5-4cfc-add3-a5f9d687455b-kube-api-access-54zvg\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.165319 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-utilities\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.165493 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d1eac02-20b5-4cfc-add3-a5f9d687455b-catalog-content\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.184872 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54zvg\" (UniqueName: \"kubernetes.io/projected/1d1eac02-20b5-4cfc-add3-a5f9d687455b-kube-api-access-54zvg\") pod \"redhat-marketplace-szcbp\" (UID: \"1d1eac02-20b5-4cfc-add3-a5f9d687455b\") " pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.215486 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wlbxq"] Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.271708 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.450157 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-szcbp"] Nov 22 10:44:47 crc kubenswrapper[4926]: W1122 10:44:47.458817 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d1eac02_20b5_4cfc_add3_a5f9d687455b.slice/crio-b43098aa3a827aa47e7fc3bdce9ab4280ae21e79cb88361dac2817a3a6a8dbed WatchSource:0}: Error finding container b43098aa3a827aa47e7fc3bdce9ab4280ae21e79cb88361dac2817a3a6a8dbed: Status 404 returned error can't find the container with id b43098aa3a827aa47e7fc3bdce9ab4280ae21e79cb88361dac2817a3a6a8dbed Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.735605 4926 generic.go:334] "Generic (PLEG): container finished" podID="800e7d7e-7580-482b-bf81-728557492bcd" containerID="29f6314f10d8a79d128e709ad2ddd3cb64e7d2aae565e07cb04af18780c9b061" exitCode=0 Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.735683 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlbxq" event={"ID":"800e7d7e-7580-482b-bf81-728557492bcd","Type":"ContainerDied","Data":"29f6314f10d8a79d128e709ad2ddd3cb64e7d2aae565e07cb04af18780c9b061"} Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.735711 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlbxq" event={"ID":"800e7d7e-7580-482b-bf81-728557492bcd","Type":"ContainerStarted","Data":"8443c2e13c7becd29d64ddc6a491169e07c1d640d1931a558f7272caa7661727"} Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.738185 4926 generic.go:334] "Generic (PLEG): container finished" podID="1d1eac02-20b5-4cfc-add3-a5f9d687455b" containerID="ea78528fe3ce21d49dc326c15320c89ce11e8bec6fec01df90699965acf5362b" exitCode=0 Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.738332 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szcbp" event={"ID":"1d1eac02-20b5-4cfc-add3-a5f9d687455b","Type":"ContainerDied","Data":"ea78528fe3ce21d49dc326c15320c89ce11e8bec6fec01df90699965acf5362b"} Nov 22 10:44:47 crc kubenswrapper[4926]: I1122 10:44:47.738369 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szcbp" event={"ID":"1d1eac02-20b5-4cfc-add3-a5f9d687455b","Type":"ContainerStarted","Data":"b43098aa3a827aa47e7fc3bdce9ab4280ae21e79cb88361dac2817a3a6a8dbed"} Nov 22 10:44:48 crc kubenswrapper[4926]: I1122 10:44:48.747081 4926 generic.go:334] "Generic (PLEG): container finished" podID="800e7d7e-7580-482b-bf81-728557492bcd" containerID="5d4b8e050a60361cd02458d060ffba6d99058a141528b9afc6ae84fb8d2add9c" exitCode=0 Nov 22 10:44:48 crc kubenswrapper[4926]: I1122 10:44:48.747118 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlbxq" event={"ID":"800e7d7e-7580-482b-bf81-728557492bcd","Type":"ContainerDied","Data":"5d4b8e050a60361cd02458d060ffba6d99058a141528b9afc6ae84fb8d2add9c"} Nov 22 10:44:48 crc kubenswrapper[4926]: I1122 10:44:48.752624 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szcbp" event={"ID":"1d1eac02-20b5-4cfc-add3-a5f9d687455b","Type":"ContainerDied","Data":"acc43921b1267de921a456b8481ffcd0f8ab2863e16aec19130947f194b501c1"} Nov 22 10:44:48 crc kubenswrapper[4926]: I1122 10:44:48.752507 4926 generic.go:334] "Generic (PLEG): container finished" podID="1d1eac02-20b5-4cfc-add3-a5f9d687455b" containerID="acc43921b1267de921a456b8481ffcd0f8ab2863e16aec19130947f194b501c1" exitCode=0 Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.124408 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v4wgk"] Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.125267 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.131703 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.136590 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4wgk"] Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.293238 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-catalog-content\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.293768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jngv2\" (UniqueName: \"kubernetes.io/projected/17936bf0-470e-4bc3-aa1d-28727f066d93-kube-api-access-jngv2\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.293840 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-utilities\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.329212 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bpmk4"] Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.330527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.332822 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.345715 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bpmk4"] Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.394998 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-utilities\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.395090 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-catalog-content\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.395130 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jngv2\" (UniqueName: \"kubernetes.io/projected/17936bf0-470e-4bc3-aa1d-28727f066d93-kube-api-access-jngv2\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.395781 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-catalog-content\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.396179 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17936bf0-470e-4bc3-aa1d-28727f066d93-utilities\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.422656 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jngv2\" (UniqueName: \"kubernetes.io/projected/17936bf0-470e-4bc3-aa1d-28727f066d93-kube-api-access-jngv2\") pod \"redhat-operators-v4wgk\" (UID: \"17936bf0-470e-4bc3-aa1d-28727f066d93\") " pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.446768 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.496259 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-catalog-content\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.496386 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-utilities\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.496440 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5mcf\" (UniqueName: \"kubernetes.io/projected/09054c3e-6dab-449c-a04e-1ba78e281575-kube-api-access-h5mcf\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.597504 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5mcf\" (UniqueName: \"kubernetes.io/projected/09054c3e-6dab-449c-a04e-1ba78e281575-kube-api-access-h5mcf\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.598079 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-catalog-content\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.598158 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-utilities\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.598628 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-utilities\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.598868 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/09054c3e-6dab-449c-a04e-1ba78e281575-catalog-content\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.621752 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5mcf\" (UniqueName: \"kubernetes.io/projected/09054c3e-6dab-449c-a04e-1ba78e281575-kube-api-access-h5mcf\") pod \"community-operators-bpmk4\" (UID: \"09054c3e-6dab-449c-a04e-1ba78e281575\") " pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.632503 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4wgk"] Nov 22 10:44:49 crc kubenswrapper[4926]: W1122 10:44:49.638876 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17936bf0_470e_4bc3_aa1d_28727f066d93.slice/crio-73edee372131721b9006b876e68346cefb3a8b5707a81ec0c28dac6c2a2621a9 WatchSource:0}: Error finding container 73edee372131721b9006b876e68346cefb3a8b5707a81ec0c28dac6c2a2621a9: Status 404 returned error can't find the container with id 73edee372131721b9006b876e68346cefb3a8b5707a81ec0c28dac6c2a2621a9 Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.653083 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.767356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wlbxq" event={"ID":"800e7d7e-7580-482b-bf81-728557492bcd","Type":"ContainerStarted","Data":"931104c24711f84dc74b71d24dac89e26c3d340f032807507053a8b38b6d7650"} Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.772910 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4wgk" event={"ID":"17936bf0-470e-4bc3-aa1d-28727f066d93","Type":"ContainerStarted","Data":"73edee372131721b9006b876e68346cefb3a8b5707a81ec0c28dac6c2a2621a9"} Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.775225 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-szcbp" event={"ID":"1d1eac02-20b5-4cfc-add3-a5f9d687455b","Type":"ContainerStarted","Data":"cfe66916e702db7f054324131ddc570e9d10e16c672625adde40bb77132c9622"} Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.787310 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wlbxq" podStartSLOduration=2.398098718 podStartE2EDuration="3.787289976s" podCreationTimestamp="2025-11-22 10:44:46 +0000 UTC" firstStartedPulling="2025-11-22 10:44:47.736919617 +0000 UTC m=+308.038524904" lastFinishedPulling="2025-11-22 10:44:49.126110865 +0000 UTC m=+309.427716162" observedRunningTime="2025-11-22 10:44:49.78533726 +0000 UTC m=+310.086942547" watchObservedRunningTime="2025-11-22 10:44:49.787289976 +0000 UTC m=+310.088895263" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.842339 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-szcbp" podStartSLOduration=2.404654386 podStartE2EDuration="3.842320472s" podCreationTimestamp="2025-11-22 10:44:46 +0000 UTC" firstStartedPulling="2025-11-22 10:44:47.743073883 +0000 UTC m=+308.044679170" lastFinishedPulling="2025-11-22 10:44:49.180739969 +0000 UTC m=+309.482345256" observedRunningTime="2025-11-22 10:44:49.806206348 +0000 UTC m=+310.107811635" watchObservedRunningTime="2025-11-22 10:44:49.842320472 +0000 UTC m=+310.143925769" Nov 22 10:44:49 crc kubenswrapper[4926]: I1122 10:44:49.843928 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bpmk4"] Nov 22 10:44:49 crc kubenswrapper[4926]: W1122 10:44:49.856117 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09054c3e_6dab_449c_a04e_1ba78e281575.slice/crio-d7479ae6d982f66a0f8534f6f349f3b714e32a8265152dae1b8f489ff376dec8 WatchSource:0}: Error finding container d7479ae6d982f66a0f8534f6f349f3b714e32a8265152dae1b8f489ff376dec8: Status 404 returned error can't find the container with id d7479ae6d982f66a0f8534f6f349f3b714e32a8265152dae1b8f489ff376dec8 Nov 22 10:44:50 crc kubenswrapper[4926]: I1122 10:44:50.780848 4926 generic.go:334] "Generic (PLEG): container finished" podID="09054c3e-6dab-449c-a04e-1ba78e281575" containerID="82e359b16612b7fa7dfd9bb40b35a8805164c748e98b9e71bd6bb7c43d5cd3b2" exitCode=0 Nov 22 10:44:50 crc kubenswrapper[4926]: I1122 10:44:50.780929 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bpmk4" event={"ID":"09054c3e-6dab-449c-a04e-1ba78e281575","Type":"ContainerDied","Data":"82e359b16612b7fa7dfd9bb40b35a8805164c748e98b9e71bd6bb7c43d5cd3b2"} Nov 22 10:44:50 crc kubenswrapper[4926]: I1122 10:44:50.781474 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bpmk4" event={"ID":"09054c3e-6dab-449c-a04e-1ba78e281575","Type":"ContainerStarted","Data":"d7479ae6d982f66a0f8534f6f349f3b714e32a8265152dae1b8f489ff376dec8"} Nov 22 10:44:50 crc kubenswrapper[4926]: I1122 10:44:50.783111 4926 generic.go:334] "Generic (PLEG): container finished" podID="17936bf0-470e-4bc3-aa1d-28727f066d93" containerID="a5c7c2c9aab9b519bd4144c843f46b8a76a70db89fe11ee490e2558e1c1b2806" exitCode=0 Nov 22 10:44:50 crc kubenswrapper[4926]: I1122 10:44:50.783206 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4wgk" event={"ID":"17936bf0-470e-4bc3-aa1d-28727f066d93","Type":"ContainerDied","Data":"a5c7c2c9aab9b519bd4144c843f46b8a76a70db89fe11ee490e2558e1c1b2806"} Nov 22 10:44:51 crc kubenswrapper[4926]: I1122 10:44:51.791478 4926 generic.go:334] "Generic (PLEG): container finished" podID="09054c3e-6dab-449c-a04e-1ba78e281575" containerID="cb5daed5a0dc540a0933c5fcfca36f9cf69271f6b7e485775c550bdc6e467ca3" exitCode=0 Nov 22 10:44:51 crc kubenswrapper[4926]: I1122 10:44:51.792058 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bpmk4" event={"ID":"09054c3e-6dab-449c-a04e-1ba78e281575","Type":"ContainerDied","Data":"cb5daed5a0dc540a0933c5fcfca36f9cf69271f6b7e485775c550bdc6e467ca3"} Nov 22 10:44:51 crc kubenswrapper[4926]: I1122 10:44:51.796803 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4wgk" event={"ID":"17936bf0-470e-4bc3-aa1d-28727f066d93","Type":"ContainerStarted","Data":"3fc4fa9f93ab6d970165b01ab3b971d3848b490f5047376c51de3ef02bbf5a46"} Nov 22 10:44:52 crc kubenswrapper[4926]: I1122 10:44:52.804749 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bpmk4" event={"ID":"09054c3e-6dab-449c-a04e-1ba78e281575","Type":"ContainerStarted","Data":"059bdc5c8334235ff1df1b2a9636894ea47ec539bea6a8c3f573faebded23813"} Nov 22 10:44:52 crc kubenswrapper[4926]: I1122 10:44:52.813470 4926 generic.go:334] "Generic (PLEG): container finished" podID="17936bf0-470e-4bc3-aa1d-28727f066d93" containerID="3fc4fa9f93ab6d970165b01ab3b971d3848b490f5047376c51de3ef02bbf5a46" exitCode=0 Nov 22 10:44:52 crc kubenswrapper[4926]: I1122 10:44:52.813520 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4wgk" event={"ID":"17936bf0-470e-4bc3-aa1d-28727f066d93","Type":"ContainerDied","Data":"3fc4fa9f93ab6d970165b01ab3b971d3848b490f5047376c51de3ef02bbf5a46"} Nov 22 10:44:52 crc kubenswrapper[4926]: I1122 10:44:52.825691 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bpmk4" podStartSLOduration=2.422810758 podStartE2EDuration="3.825675787s" podCreationTimestamp="2025-11-22 10:44:49 +0000 UTC" firstStartedPulling="2025-11-22 10:44:50.785335414 +0000 UTC m=+311.086940701" lastFinishedPulling="2025-11-22 10:44:52.188200443 +0000 UTC m=+312.489805730" observedRunningTime="2025-11-22 10:44:52.822872886 +0000 UTC m=+313.124478203" watchObservedRunningTime="2025-11-22 10:44:52.825675787 +0000 UTC m=+313.127281074" Nov 22 10:44:54 crc kubenswrapper[4926]: I1122 10:44:54.827049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4wgk" event={"ID":"17936bf0-470e-4bc3-aa1d-28727f066d93","Type":"ContainerStarted","Data":"3759a4d873eaedef2f40819f8921e79acef5047682b4163440acd62a216c9781"} Nov 22 10:44:54 crc kubenswrapper[4926]: I1122 10:44:54.855185 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v4wgk" podStartSLOduration=3.311428212 podStartE2EDuration="5.855167768s" podCreationTimestamp="2025-11-22 10:44:49 +0000 UTC" firstStartedPulling="2025-11-22 10:44:50.785028385 +0000 UTC m=+311.086633672" lastFinishedPulling="2025-11-22 10:44:53.328767931 +0000 UTC m=+313.630373228" observedRunningTime="2025-11-22 10:44:54.853275694 +0000 UTC m=+315.154880981" watchObservedRunningTime="2025-11-22 10:44:54.855167768 +0000 UTC m=+315.156773055" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.044429 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.046034 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.113126 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.271973 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.272056 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.309924 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.895529 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-szcbp" Nov 22 10:44:57 crc kubenswrapper[4926]: I1122 10:44:57.903091 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wlbxq" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.447865 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.448280 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.654086 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.654142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.706326 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:44:59 crc kubenswrapper[4926]: I1122 10:44:59.898936 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bpmk4" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.145069 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2"] Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.145852 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.147726 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.149560 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.150185 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2"] Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.243124 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lfvb\" (UniqueName: \"kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.243203 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.243231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.344786 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lfvb\" (UniqueName: \"kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.344867 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.344928 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.345803 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.354505 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.360800 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lfvb\" (UniqueName: \"kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb\") pod \"collect-profiles-29396805-9nvz2\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.468019 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.485134 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v4wgk" podUID="17936bf0-470e-4bc3-aa1d-28727f066d93" containerName="registry-server" probeResult="failure" output=< Nov 22 10:45:00 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 10:45:00 crc kubenswrapper[4926]: > Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.683470 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2"] Nov 22 10:45:00 crc kubenswrapper[4926]: W1122 10:45:00.701066 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17576076_3001_49ed_a84d_b691103b01f6.slice/crio-f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5 WatchSource:0}: Error finding container f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5: Status 404 returned error can't find the container with id f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5 Nov 22 10:45:00 crc kubenswrapper[4926]: I1122 10:45:00.862161 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" event={"ID":"17576076-3001-49ed-a84d-b691103b01f6","Type":"ContainerStarted","Data":"f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5"} Nov 22 10:45:01 crc kubenswrapper[4926]: I1122 10:45:01.867546 4926 generic.go:334] "Generic (PLEG): container finished" podID="17576076-3001-49ed-a84d-b691103b01f6" containerID="9ea63c375f1834940d7f3e3966c602507e7e88e4a0e00624092fb00c53e59759" exitCode=0 Nov 22 10:45:01 crc kubenswrapper[4926]: I1122 10:45:01.867592 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" event={"ID":"17576076-3001-49ed-a84d-b691103b01f6","Type":"ContainerDied","Data":"9ea63c375f1834940d7f3e3966c602507e7e88e4a0e00624092fb00c53e59759"} Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.093924 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.179855 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume\") pod \"17576076-3001-49ed-a84d-b691103b01f6\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.180137 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lfvb\" (UniqueName: \"kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb\") pod \"17576076-3001-49ed-a84d-b691103b01f6\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.180223 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume\") pod \"17576076-3001-49ed-a84d-b691103b01f6\" (UID: \"17576076-3001-49ed-a84d-b691103b01f6\") " Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.181072 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "17576076-3001-49ed-a84d-b691103b01f6" (UID: "17576076-3001-49ed-a84d-b691103b01f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.185798 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb" (OuterVolumeSpecName: "kube-api-access-7lfvb") pod "17576076-3001-49ed-a84d-b691103b01f6" (UID: "17576076-3001-49ed-a84d-b691103b01f6"). InnerVolumeSpecName "kube-api-access-7lfvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.185859 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "17576076-3001-49ed-a84d-b691103b01f6" (UID: "17576076-3001-49ed-a84d-b691103b01f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.281865 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lfvb\" (UniqueName: \"kubernetes.io/projected/17576076-3001-49ed-a84d-b691103b01f6-kube-api-access-7lfvb\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.281983 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17576076-3001-49ed-a84d-b691103b01f6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.282010 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17576076-3001-49ed-a84d-b691103b01f6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.881086 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" event={"ID":"17576076-3001-49ed-a84d-b691103b01f6","Type":"ContainerDied","Data":"f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5"} Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.881431 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1e364c748ca5bc31e9a0f9c94f5e298b539694c636ca01e67afcefcd6ec4bd5" Nov 22 10:45:03 crc kubenswrapper[4926]: I1122 10:45:03.881171 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2" Nov 22 10:45:09 crc kubenswrapper[4926]: I1122 10:45:09.490290 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:45:09 crc kubenswrapper[4926]: I1122 10:45:09.546671 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v4wgk" Nov 22 10:45:39 crc kubenswrapper[4926]: I1122 10:45:39.661775 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:45:39 crc kubenswrapper[4926]: I1122 10:45:39.663777 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:46:09 crc kubenswrapper[4926]: I1122 10:46:09.661578 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:46:09 crc kubenswrapper[4926]: I1122 10:46:09.662135 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:46:39 crc kubenswrapper[4926]: I1122 10:46:39.661498 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:46:39 crc kubenswrapper[4926]: I1122 10:46:39.662365 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:46:39 crc kubenswrapper[4926]: I1122 10:46:39.662444 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:46:39 crc kubenswrapper[4926]: I1122 10:46:39.663500 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:46:39 crc kubenswrapper[4926]: I1122 10:46:39.663588 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e" gracePeriod=600 Nov 22 10:46:40 crc kubenswrapper[4926]: I1122 10:46:40.504779 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e" exitCode=0 Nov 22 10:46:40 crc kubenswrapper[4926]: I1122 10:46:40.504872 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e"} Nov 22 10:46:40 crc kubenswrapper[4926]: I1122 10:46:40.505120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885"} Nov 22 10:46:40 crc kubenswrapper[4926]: I1122 10:46:40.505139 4926 scope.go:117] "RemoveContainer" containerID="fb54559825bae40317f4dbc23c5b4238c4681f9845053656d59b2d2b3e504e1a" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.287472 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-px4t7"] Nov 22 10:48:05 crc kubenswrapper[4926]: E1122 10:48:05.288229 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17576076-3001-49ed-a84d-b691103b01f6" containerName="collect-profiles" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.288246 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="17576076-3001-49ed-a84d-b691103b01f6" containerName="collect-profiles" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.288365 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="17576076-3001-49ed-a84d-b691103b01f6" containerName="collect-profiles" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.288762 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.334864 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-px4t7"] Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416370 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416443 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-bound-sa-token\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/baebebd2-e543-4895-956a-a8d112442663-installation-pull-secrets\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416492 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-registry-tls\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416538 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/baebebd2-e543-4895-956a-a8d112442663-ca-trust-extracted\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416562 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-registry-certificates\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416579 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-trusted-ca\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.416622 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flv9s\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-kube-api-access-flv9s\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.438032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517221 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flv9s\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-kube-api-access-flv9s\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517298 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-bound-sa-token\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517319 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/baebebd2-e543-4895-956a-a8d112442663-installation-pull-secrets\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517343 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-registry-tls\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517361 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/baebebd2-e543-4895-956a-a8d112442663-ca-trust-extracted\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517378 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-registry-certificates\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.517393 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-trusted-ca\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.518077 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/baebebd2-e543-4895-956a-a8d112442663-ca-trust-extracted\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.518590 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-trusted-ca\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.518600 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/baebebd2-e543-4895-956a-a8d112442663-registry-certificates\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.523635 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-registry-tls\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.524013 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/baebebd2-e543-4895-956a-a8d112442663-installation-pull-secrets\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.531854 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flv9s\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-kube-api-access-flv9s\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.532059 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baebebd2-e543-4895-956a-a8d112442663-bound-sa-token\") pod \"image-registry-66df7c8f76-px4t7\" (UID: \"baebebd2-e543-4895-956a-a8d112442663\") " pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.602769 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:05 crc kubenswrapper[4926]: I1122 10:48:05.838408 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-px4t7"] Nov 22 10:48:06 crc kubenswrapper[4926]: I1122 10:48:06.035133 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" event={"ID":"baebebd2-e543-4895-956a-a8d112442663","Type":"ContainerStarted","Data":"21ce1d364ff9d1927f586d5ba6803b92463bb3dcc4269464782ea16e2dbd2ed3"} Nov 22 10:48:06 crc kubenswrapper[4926]: I1122 10:48:06.035173 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" event={"ID":"baebebd2-e543-4895-956a-a8d112442663","Type":"ContainerStarted","Data":"2e6727cc861485546beebb8e5bd717f2bcd199c9adeab6fe814a896958d0c046"} Nov 22 10:48:06 crc kubenswrapper[4926]: I1122 10:48:06.035969 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:06 crc kubenswrapper[4926]: I1122 10:48:06.058083 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" podStartSLOduration=1.058062701 podStartE2EDuration="1.058062701s" podCreationTimestamp="2025-11-22 10:48:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:48:06.055637912 +0000 UTC m=+506.357243279" watchObservedRunningTime="2025-11-22 10:48:06.058062701 +0000 UTC m=+506.359667988" Nov 22 10:48:25 crc kubenswrapper[4926]: I1122 10:48:25.607676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-px4t7" Nov 22 10:48:25 crc kubenswrapper[4926]: I1122 10:48:25.687927 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:48:39 crc kubenswrapper[4926]: I1122 10:48:39.661241 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:48:39 crc kubenswrapper[4926]: I1122 10:48:39.661973 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:48:50 crc kubenswrapper[4926]: I1122 10:48:50.769656 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" podUID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" containerName="registry" containerID="cri-o://cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226" gracePeriod=30 Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.093058 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.256983 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257078 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257140 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm522\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257172 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257220 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257248 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257288 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.257366 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets\") pod \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\" (UID: \"d7487242-27ca-4f15-8d7f-6a7cf67e8992\") " Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.258255 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.260015 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.269508 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.269643 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.269829 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.270419 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522" (OuterVolumeSpecName: "kube-api-access-nm522") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "kube-api-access-nm522". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.270722 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.276902 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "d7487242-27ca-4f15-8d7f-6a7cf67e8992" (UID: "d7487242-27ca-4f15-8d7f-6a7cf67e8992"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.335077 4926 generic.go:334] "Generic (PLEG): container finished" podID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" containerID="cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226" exitCode=0 Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.335137 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.335143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" event={"ID":"d7487242-27ca-4f15-8d7f-6a7cf67e8992","Type":"ContainerDied","Data":"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226"} Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.335264 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-wlnj9" event={"ID":"d7487242-27ca-4f15-8d7f-6a7cf67e8992","Type":"ContainerDied","Data":"99b73edd8a23876dffd0852a6ef6fafaa9b7d377a55d80271beab1e22092f62a"} Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.335492 4926 scope.go:117] "RemoveContainer" containerID="cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.358698 4926 scope.go:117] "RemoveContainer" containerID="cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361771 4926 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361827 4926 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/d7487242-27ca-4f15-8d7f-6a7cf67e8992-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361851 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361867 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm522\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-kube-api-access-nm522\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361883 4926 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/d7487242-27ca-4f15-8d7f-6a7cf67e8992-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.361989 4926 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.362008 4926 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/d7487242-27ca-4f15-8d7f-6a7cf67e8992-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 10:48:51 crc kubenswrapper[4926]: E1122 10:48:51.365877 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226\": container with ID starting with cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226 not found: ID does not exist" containerID="cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.366001 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226"} err="failed to get container status \"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226\": rpc error: code = NotFound desc = could not find container \"cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226\": container with ID starting with cf25e80f29d26f1063b60d0aedcdead40278024d15acda1a5cec8783ab409226 not found: ID does not exist" Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.367458 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:48:51 crc kubenswrapper[4926]: I1122 10:48:51.371355 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-wlnj9"] Nov 22 10:48:52 crc kubenswrapper[4926]: I1122 10:48:52.590657 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" path="/var/lib/kubelet/pods/d7487242-27ca-4f15-8d7f-6a7cf67e8992/volumes" Nov 22 10:49:09 crc kubenswrapper[4926]: I1122 10:49:09.661732 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:49:09 crc kubenswrapper[4926]: I1122 10:49:09.662724 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:49:39 crc kubenswrapper[4926]: I1122 10:49:39.660741 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:49:39 crc kubenswrapper[4926]: I1122 10:49:39.661470 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:49:39 crc kubenswrapper[4926]: I1122 10:49:39.661538 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:49:39 crc kubenswrapper[4926]: I1122 10:49:39.662478 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:49:39 crc kubenswrapper[4926]: I1122 10:49:39.662600 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885" gracePeriod=600 Nov 22 10:49:40 crc kubenswrapper[4926]: I1122 10:49:40.693849 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885" exitCode=0 Nov 22 10:49:40 crc kubenswrapper[4926]: I1122 10:49:40.693928 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885"} Nov 22 10:49:40 crc kubenswrapper[4926]: I1122 10:49:40.695016 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8"} Nov 22 10:49:40 crc kubenswrapper[4926]: I1122 10:49:40.695099 4926 scope.go:117] "RemoveContainer" containerID="a01df6d05065d9939208f762177f66a644f719d91c6951810eab9a58a33f140e" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.381054 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-zzrcx"] Nov 22 10:50:01 crc kubenswrapper[4926]: E1122 10:50:01.381808 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" containerName="registry" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.381823 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" containerName="registry" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.381953 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7487242-27ca-4f15-8d7f-6a7cf67e8992" containerName="registry" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.382412 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.387385 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.387940 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-f4xh9" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.388233 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.393154 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xpqf5"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.394056 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-xpqf5" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.395542 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-zzrcx"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.397858 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-fc9vq" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.411924 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xpqf5"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.421275 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w4h2r"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.422080 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.423681 4926 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mq8qv" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.429656 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w4h2r"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.471082 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cn74p\" (UniqueName: \"kubernetes.io/projected/ffec6625-ab3c-4e67-af68-afdbe4210730-kube-api-access-cn74p\") pod \"cert-manager-webhook-5655c58dd6-w4h2r\" (UID: \"ffec6625-ab3c-4e67-af68-afdbe4210730\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.471130 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdrc4\" (UniqueName: \"kubernetes.io/projected/46c8cced-eb2e-409c-9923-f28c5924e5b1-kube-api-access-pdrc4\") pod \"cert-manager-cainjector-7f985d654d-zzrcx\" (UID: \"46c8cced-eb2e-409c-9923-f28c5924e5b1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.471164 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcccn\" (UniqueName: \"kubernetes.io/projected/473a1f27-e3c6-4c74-9daf-da6ae42cc754-kube-api-access-wcccn\") pod \"cert-manager-5b446d88c5-xpqf5\" (UID: \"473a1f27-e3c6-4c74-9daf-da6ae42cc754\") " pod="cert-manager/cert-manager-5b446d88c5-xpqf5" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.573050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdrc4\" (UniqueName: \"kubernetes.io/projected/46c8cced-eb2e-409c-9923-f28c5924e5b1-kube-api-access-pdrc4\") pod \"cert-manager-cainjector-7f985d654d-zzrcx\" (UID: \"46c8cced-eb2e-409c-9923-f28c5924e5b1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.573169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcccn\" (UniqueName: \"kubernetes.io/projected/473a1f27-e3c6-4c74-9daf-da6ae42cc754-kube-api-access-wcccn\") pod \"cert-manager-5b446d88c5-xpqf5\" (UID: \"473a1f27-e3c6-4c74-9daf-da6ae42cc754\") " pod="cert-manager/cert-manager-5b446d88c5-xpqf5" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.573272 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cn74p\" (UniqueName: \"kubernetes.io/projected/ffec6625-ab3c-4e67-af68-afdbe4210730-kube-api-access-cn74p\") pod \"cert-manager-webhook-5655c58dd6-w4h2r\" (UID: \"ffec6625-ab3c-4e67-af68-afdbe4210730\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.593061 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcccn\" (UniqueName: \"kubernetes.io/projected/473a1f27-e3c6-4c74-9daf-da6ae42cc754-kube-api-access-wcccn\") pod \"cert-manager-5b446d88c5-xpqf5\" (UID: \"473a1f27-e3c6-4c74-9daf-da6ae42cc754\") " pod="cert-manager/cert-manager-5b446d88c5-xpqf5" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.593177 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cn74p\" (UniqueName: \"kubernetes.io/projected/ffec6625-ab3c-4e67-af68-afdbe4210730-kube-api-access-cn74p\") pod \"cert-manager-webhook-5655c58dd6-w4h2r\" (UID: \"ffec6625-ab3c-4e67-af68-afdbe4210730\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.593596 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdrc4\" (UniqueName: \"kubernetes.io/projected/46c8cced-eb2e-409c-9923-f28c5924e5b1-kube-api-access-pdrc4\") pod \"cert-manager-cainjector-7f985d654d-zzrcx\" (UID: \"46c8cced-eb2e-409c-9923-f28c5924e5b1\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.699593 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.713273 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-xpqf5" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.744045 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.950820 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-zzrcx"] Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.962069 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:50:01 crc kubenswrapper[4926]: I1122 10:50:01.980144 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-xpqf5"] Nov 22 10:50:02 crc kubenswrapper[4926]: I1122 10:50:02.010566 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w4h2r"] Nov 22 10:50:02 crc kubenswrapper[4926]: W1122 10:50:02.017292 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podffec6625_ab3c_4e67_af68_afdbe4210730.slice/crio-c91fc57fd2e67c82d800c1e78c05cbc9f6eda04da662349ec5f63ec9b66b2d5b WatchSource:0}: Error finding container c91fc57fd2e67c82d800c1e78c05cbc9f6eda04da662349ec5f63ec9b66b2d5b: Status 404 returned error can't find the container with id c91fc57fd2e67c82d800c1e78c05cbc9f6eda04da662349ec5f63ec9b66b2d5b Nov 22 10:50:02 crc kubenswrapper[4926]: I1122 10:50:02.837379 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-xpqf5" event={"ID":"473a1f27-e3c6-4c74-9daf-da6ae42cc754","Type":"ContainerStarted","Data":"9b0f4a57d3e6a45a93cbd5791b409862d229685478726ab3be03130aa5d2e7a6"} Nov 22 10:50:02 crc kubenswrapper[4926]: I1122 10:50:02.839190 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" event={"ID":"46c8cced-eb2e-409c-9923-f28c5924e5b1","Type":"ContainerStarted","Data":"5d3487b11ffd22ef88c3ce940a7a4fa35d6cce62b7485f6dc338a22bdcb23ffb"} Nov 22 10:50:02 crc kubenswrapper[4926]: I1122 10:50:02.841031 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" event={"ID":"ffec6625-ab3c-4e67-af68-afdbe4210730","Type":"ContainerStarted","Data":"c91fc57fd2e67c82d800c1e78c05cbc9f6eda04da662349ec5f63ec9b66b2d5b"} Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.855637 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" event={"ID":"46c8cced-eb2e-409c-9923-f28c5924e5b1","Type":"ContainerStarted","Data":"9e066f60fda7d33a9f351a7e13029c8b3ace6690abb3bed0df2d0900011cf268"} Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.857023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" event={"ID":"ffec6625-ab3c-4e67-af68-afdbe4210730","Type":"ContainerStarted","Data":"418f3b0d0b732c8af773d7a7ae7918ff20c2975d347bdbdd2afd04b6acc6ecd0"} Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.857696 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.859028 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-xpqf5" event={"ID":"473a1f27-e3c6-4c74-9daf-da6ae42cc754","Type":"ContainerStarted","Data":"cf6a8aaa0a71bfaa580787c40e04e6d3549c728eab2ecc6bf2a82f3811f01ea0"} Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.870847 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-zzrcx" podStartSLOduration=1.445964795 podStartE2EDuration="4.87082813s" podCreationTimestamp="2025-11-22 10:50:01 +0000 UTC" firstStartedPulling="2025-11-22 10:50:01.961833529 +0000 UTC m=+622.263438806" lastFinishedPulling="2025-11-22 10:50:05.386696854 +0000 UTC m=+625.688302141" observedRunningTime="2025-11-22 10:50:05.868943556 +0000 UTC m=+626.170548843" watchObservedRunningTime="2025-11-22 10:50:05.87082813 +0000 UTC m=+626.172433417" Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.891045 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-xpqf5" podStartSLOduration=1.560693921 podStartE2EDuration="4.891025337s" podCreationTimestamp="2025-11-22 10:50:01 +0000 UTC" firstStartedPulling="2025-11-22 10:50:01.994153462 +0000 UTC m=+622.295758749" lastFinishedPulling="2025-11-22 10:50:05.324484878 +0000 UTC m=+625.626090165" observedRunningTime="2025-11-22 10:50:05.887190867 +0000 UTC m=+626.188796164" watchObservedRunningTime="2025-11-22 10:50:05.891025337 +0000 UTC m=+626.192630624" Nov 22 10:50:05 crc kubenswrapper[4926]: I1122 10:50:05.906455 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" podStartSLOduration=1.6004741569999998 podStartE2EDuration="4.906437047s" podCreationTimestamp="2025-11-22 10:50:01 +0000 UTC" firstStartedPulling="2025-11-22 10:50:02.018539398 +0000 UTC m=+622.320144685" lastFinishedPulling="2025-11-22 10:50:05.324502288 +0000 UTC m=+625.626107575" observedRunningTime="2025-11-22 10:50:05.902806253 +0000 UTC m=+626.204411540" watchObservedRunningTime="2025-11-22 10:50:05.906437047 +0000 UTC m=+626.208042334" Nov 22 10:50:11 crc kubenswrapper[4926]: I1122 10:50:11.749116 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-w4h2r" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.113513 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z69nr"] Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114252 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-controller" containerID="cri-o://7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114291 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="nbdb" containerID="cri-o://ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114366 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114453 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="northd" containerID="cri-o://f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114450 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-node" containerID="cri-o://20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114622 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="sbdb" containerID="cri-o://19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.114655 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-acl-logging" containerID="cri-o://4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.174378 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" containerID="cri-o://4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" gracePeriod=30 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.459722 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/3.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.461710 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovn-acl-logging/0.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.462174 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovn-controller/0.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.462608 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523320 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-rdpq5"] Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523520 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523534 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523546 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kubecfg-setup" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523552 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kubecfg-setup" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523561 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="nbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523568 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="nbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523576 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523582 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523589 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="northd" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523596 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="northd" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523603 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523610 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523620 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523626 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523634 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="sbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523640 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="sbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523655 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-node" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523662 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-node" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523673 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-acl-logging" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523679 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-acl-logging" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.523687 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523692 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523797 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523808 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="sbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523819 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523825 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="northd" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523833 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523842 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523850 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="kube-rbac-proxy-node" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523860 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523870 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-acl-logging" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523879 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovn-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523912 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.523922 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="nbdb" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.524033 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.524042 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.524050 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.524056 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerName="ovnkube-controller" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.525629 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533179 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533224 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533245 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533269 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533305 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533321 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533340 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533358 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533417 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533448 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533443 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533531 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533540 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533525 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533564 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533575 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash" (OuterVolumeSpecName: "host-slash") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533629 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x999w\" (UniqueName: \"kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533788 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533555 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533584 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log" (OuterVolumeSpecName: "node-log") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533602 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533836 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533859 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533878 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533960 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.533983 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534008 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534100 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534172 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534187 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534224 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides\") pod \"25bc94bb-a5d1-431c-9847-2f6a02997e25\" (UID: \"25bc94bb-a5d1-431c-9847-2f6a02997e25\") " Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534313 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534361 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket" (OuterVolumeSpecName: "log-socket") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534421 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534591 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.534927 4926 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535013 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535400 4926 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535471 4926 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535488 4926 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535620 4926 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535654 4926 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-log-socket\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535691 4926 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535710 4926 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535726 4926 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535742 4926 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-slash\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535758 4926 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535775 4926 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535791 4926 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-node-log\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535068 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535806 4926 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.535866 4926 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.541330 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.541603 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w" (OuterVolumeSpecName: "kube-api-access-x999w") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "kube-api-access-x999w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.561609 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "25bc94bb-a5d1-431c-9847-2f6a02997e25" (UID: "25bc94bb-a5d1-431c-9847-2f6a02997e25"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.636660 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-ovn\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.636871 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-log-socket\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637075 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-etc-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637198 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-var-lib-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637319 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-config\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637425 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-env-overrides\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637523 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-netns\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637624 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637733 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h5wb\" (UniqueName: \"kubernetes.io/projected/b85a9da2-2c6e-4b71-870e-5d64d6832f37-kube-api-access-9h5wb\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.637951 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-netd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638045 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-node-log\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638077 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-script-lib\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638144 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-bin\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638166 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovn-node-metrics-cert\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638202 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-systemd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638241 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638272 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-slash\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-systemd-units\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638490 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638534 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-kubelet\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638692 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x999w\" (UniqueName: \"kubernetes.io/projected/25bc94bb-a5d1-431c-9847-2f6a02997e25-kube-api-access-x999w\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638717 4926 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/25bc94bb-a5d1-431c-9847-2f6a02997e25-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638729 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/25bc94bb-a5d1-431c-9847-2f6a02997e25-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.638742 4926 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/25bc94bb-a5d1-431c-9847-2f6a02997e25-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740508 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-kubelet\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740668 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-ovn\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740747 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-ovn\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740608 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-kubelet\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740802 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-log-socket\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740828 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-etc-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740872 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-var-lib-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740923 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-etc-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740921 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-log-socket\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.740988 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-var-lib-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.741053 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-config\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.741850 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-config\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742301 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-env-overrides\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.741075 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-env-overrides\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742425 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-netns\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742505 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-netns\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742460 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742572 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h5wb\" (UniqueName: \"kubernetes.io/projected/b85a9da2-2c6e-4b71-870e-5d64d6832f37-kube-api-access-9h5wb\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743003 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-netd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743115 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-node-log\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743190 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-node-log\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.742615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-openvswitch\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-netd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-script-lib\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743933 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovnkube-script-lib\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.743285 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-bin\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744005 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovn-node-metrics-cert\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-cni-bin\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744554 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-systemd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-run-systemd\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744685 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-slash\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744751 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744846 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-systemd-units\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744944 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-run-ovn-kubernetes\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.744970 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-host-slash\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.745001 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/b85a9da2-2c6e-4b71-870e-5d64d6832f37-systemd-units\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.747571 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/b85a9da2-2c6e-4b71-870e-5d64d6832f37-ovn-node-metrics-cert\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.759078 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h5wb\" (UniqueName: \"kubernetes.io/projected/b85a9da2-2c6e-4b71-870e-5d64d6832f37-kube-api-access-9h5wb\") pod \"ovnkube-node-rdpq5\" (UID: \"b85a9da2-2c6e-4b71-870e-5d64d6832f37\") " pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.845576 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:12 crc kubenswrapper[4926]: W1122 10:50:12.876545 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb85a9da2_2c6e_4b71_870e_5d64d6832f37.slice/crio-4d144c35b5182596c5f35246a5641f56ffb568a986f85779a4bdaacfac7f116c WatchSource:0}: Error finding container 4d144c35b5182596c5f35246a5641f56ffb568a986f85779a4bdaacfac7f116c: Status 404 returned error can't find the container with id 4d144c35b5182596c5f35246a5641f56ffb568a986f85779a4bdaacfac7f116c Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.905760 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"4d144c35b5182596c5f35246a5641f56ffb568a986f85779a4bdaacfac7f116c"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.908200 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/2.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.909048 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/1.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.909125 4926 generic.go:334] "Generic (PLEG): container finished" podID="36de2843-6491-4c54-b624-c4a3d328c164" containerID="714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74" exitCode=2 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.909160 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerDied","Data":"714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.909199 4926 scope.go:117] "RemoveContainer" containerID="954a6a1daad88733da68073d0f235894eb5931360545e07bc25ca2cbedf0efae" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.910090 4926 scope.go:117] "RemoveContainer" containerID="714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74" Nov 22 10:50:12 crc kubenswrapper[4926]: E1122 10:50:12.910596 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-c6w2q_openshift-multus(36de2843-6491-4c54-b624-c4a3d328c164)\"" pod="openshift-multus/multus-c6w2q" podUID="36de2843-6491-4c54-b624-c4a3d328c164" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.914680 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovnkube-controller/3.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.920225 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovn-acl-logging/0.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.924950 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-z69nr_25bc94bb-a5d1-431c-9847-2f6a02997e25/ovn-controller/0.log" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925512 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925555 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925568 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925577 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925585 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925597 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" exitCode=0 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925606 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" exitCode=143 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925617 4926 generic.go:334] "Generic (PLEG): container finished" podID="25bc94bb-a5d1-431c-9847-2f6a02997e25" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" exitCode=143 Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925638 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925642 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925775 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925784 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925793 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925802 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925812 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925821 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925827 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925832 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925837 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925842 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925847 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925852 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925869 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925875 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925900 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925910 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925916 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925921 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925926 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925933 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925938 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925943 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925948 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925953 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925958 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925974 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925983 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925990 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.925997 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926003 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926009 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926015 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926023 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926029 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926036 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926046 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-z69nr" event={"ID":"25bc94bb-a5d1-431c-9847-2f6a02997e25","Type":"ContainerDied","Data":"bed6871f225e6e6df17fd36e043a78753caee7423ab367ed98812e6abeb60395"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926056 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926065 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926072 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926078 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926085 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926092 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926099 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926107 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926113 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.926121 4926 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.955079 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z69nr"] Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.975664 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-z69nr"] Nov 22 10:50:12 crc kubenswrapper[4926]: I1122 10:50:12.983224 4926 scope.go:117] "RemoveContainer" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.007907 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.028035 4926 scope.go:117] "RemoveContainer" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.045026 4926 scope.go:117] "RemoveContainer" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.120326 4926 scope.go:117] "RemoveContainer" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.133671 4926 scope.go:117] "RemoveContainer" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.146313 4926 scope.go:117] "RemoveContainer" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.160745 4926 scope.go:117] "RemoveContainer" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.176301 4926 scope.go:117] "RemoveContainer" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.188043 4926 scope.go:117] "RemoveContainer" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.199459 4926 scope.go:117] "RemoveContainer" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.199747 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": container with ID starting with 4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761 not found: ID does not exist" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.199821 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} err="failed to get container status \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": rpc error: code = NotFound desc = could not find container \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": container with ID starting with 4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.199851 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.200193 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": container with ID starting with b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f not found: ID does not exist" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.200229 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} err="failed to get container status \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": rpc error: code = NotFound desc = could not find container \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": container with ID starting with b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.200258 4926 scope.go:117] "RemoveContainer" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.200646 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": container with ID starting with 19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a not found: ID does not exist" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.200675 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} err="failed to get container status \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": rpc error: code = NotFound desc = could not find container \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": container with ID starting with 19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.200696 4926 scope.go:117] "RemoveContainer" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.201046 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": container with ID starting with ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7 not found: ID does not exist" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201072 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} err="failed to get container status \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": rpc error: code = NotFound desc = could not find container \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": container with ID starting with ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201089 4926 scope.go:117] "RemoveContainer" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.201491 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": container with ID starting with f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f not found: ID does not exist" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201512 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} err="failed to get container status \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": rpc error: code = NotFound desc = could not find container \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": container with ID starting with f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201528 4926 scope.go:117] "RemoveContainer" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.201741 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": container with ID starting with 877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58 not found: ID does not exist" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201764 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} err="failed to get container status \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": rpc error: code = NotFound desc = could not find container \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": container with ID starting with 877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201778 4926 scope.go:117] "RemoveContainer" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.201963 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": container with ID starting with 20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4 not found: ID does not exist" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.201988 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} err="failed to get container status \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": rpc error: code = NotFound desc = could not find container \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": container with ID starting with 20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.202029 4926 scope.go:117] "RemoveContainer" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.202200 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": container with ID starting with 4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2 not found: ID does not exist" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.202229 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} err="failed to get container status \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": rpc error: code = NotFound desc = could not find container \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": container with ID starting with 4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.202244 4926 scope.go:117] "RemoveContainer" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.202602 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": container with ID starting with 7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9 not found: ID does not exist" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.202624 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} err="failed to get container status \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": rpc error: code = NotFound desc = could not find container \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": container with ID starting with 7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.202638 4926 scope.go:117] "RemoveContainer" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: E1122 10:50:13.203103 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": container with ID starting with fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1 not found: ID does not exist" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203125 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} err="failed to get container status \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": rpc error: code = NotFound desc = could not find container \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": container with ID starting with fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203140 4926 scope.go:117] "RemoveContainer" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203528 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} err="failed to get container status \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": rpc error: code = NotFound desc = could not find container \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": container with ID starting with 4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203549 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203810 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} err="failed to get container status \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": rpc error: code = NotFound desc = could not find container \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": container with ID starting with b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.203828 4926 scope.go:117] "RemoveContainer" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204134 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} err="failed to get container status \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": rpc error: code = NotFound desc = could not find container \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": container with ID starting with 19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204151 4926 scope.go:117] "RemoveContainer" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204361 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} err="failed to get container status \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": rpc error: code = NotFound desc = could not find container \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": container with ID starting with ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204387 4926 scope.go:117] "RemoveContainer" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204665 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} err="failed to get container status \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": rpc error: code = NotFound desc = could not find container \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": container with ID starting with f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.204685 4926 scope.go:117] "RemoveContainer" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205072 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} err="failed to get container status \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": rpc error: code = NotFound desc = could not find container \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": container with ID starting with 877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205098 4926 scope.go:117] "RemoveContainer" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205327 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} err="failed to get container status \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": rpc error: code = NotFound desc = could not find container \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": container with ID starting with 20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205353 4926 scope.go:117] "RemoveContainer" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205581 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} err="failed to get container status \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": rpc error: code = NotFound desc = could not find container \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": container with ID starting with 4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.205598 4926 scope.go:117] "RemoveContainer" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206018 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} err="failed to get container status \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": rpc error: code = NotFound desc = could not find container \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": container with ID starting with 7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206037 4926 scope.go:117] "RemoveContainer" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206500 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} err="failed to get container status \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": rpc error: code = NotFound desc = could not find container \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": container with ID starting with fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206526 4926 scope.go:117] "RemoveContainer" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206741 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} err="failed to get container status \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": rpc error: code = NotFound desc = could not find container \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": container with ID starting with 4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.206759 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207015 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} err="failed to get container status \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": rpc error: code = NotFound desc = could not find container \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": container with ID starting with b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207033 4926 scope.go:117] "RemoveContainer" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207682 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} err="failed to get container status \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": rpc error: code = NotFound desc = could not find container \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": container with ID starting with 19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207703 4926 scope.go:117] "RemoveContainer" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207978 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} err="failed to get container status \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": rpc error: code = NotFound desc = could not find container \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": container with ID starting with ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.207996 4926 scope.go:117] "RemoveContainer" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208368 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} err="failed to get container status \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": rpc error: code = NotFound desc = could not find container \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": container with ID starting with f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208389 4926 scope.go:117] "RemoveContainer" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208631 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} err="failed to get container status \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": rpc error: code = NotFound desc = could not find container \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": container with ID starting with 877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208670 4926 scope.go:117] "RemoveContainer" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208940 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} err="failed to get container status \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": rpc error: code = NotFound desc = could not find container \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": container with ID starting with 20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.208962 4926 scope.go:117] "RemoveContainer" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209194 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} err="failed to get container status \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": rpc error: code = NotFound desc = could not find container \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": container with ID starting with 4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209213 4926 scope.go:117] "RemoveContainer" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209527 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} err="failed to get container status \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": rpc error: code = NotFound desc = could not find container \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": container with ID starting with 7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209553 4926 scope.go:117] "RemoveContainer" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209776 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} err="failed to get container status \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": rpc error: code = NotFound desc = could not find container \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": container with ID starting with fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.209793 4926 scope.go:117] "RemoveContainer" containerID="4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210170 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761"} err="failed to get container status \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": rpc error: code = NotFound desc = could not find container \"4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761\": container with ID starting with 4271454bde9cb7e8d020b6afb988e02927d482eb62ccfdad1182a4e8e3c46761 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210188 4926 scope.go:117] "RemoveContainer" containerID="b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210393 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f"} err="failed to get container status \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": rpc error: code = NotFound desc = could not find container \"b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f\": container with ID starting with b2e1039227d0efdfdc18a3ec2d9bac78d747c80c3ae8c0a78b3cef27bae4245f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210413 4926 scope.go:117] "RemoveContainer" containerID="19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210628 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a"} err="failed to get container status \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": rpc error: code = NotFound desc = could not find container \"19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a\": container with ID starting with 19bceeb84e2cce918b75507fb0f9d5e4365fcd0d30da4491b83ed54b3c2d369a not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210645 4926 scope.go:117] "RemoveContainer" containerID="ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210831 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7"} err="failed to get container status \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": rpc error: code = NotFound desc = could not find container \"ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7\": container with ID starting with ecaafcef7f92f85be04341712725da108f6dc00c79d4fb72f683186ccaa7c7d7 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.210848 4926 scope.go:117] "RemoveContainer" containerID="f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211081 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f"} err="failed to get container status \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": rpc error: code = NotFound desc = could not find container \"f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f\": container with ID starting with f2e1c170bdf63c83df5049fbe46ef4de6e08838e961ed3c16904cb9572fdf52f not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211111 4926 scope.go:117] "RemoveContainer" containerID="877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211397 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58"} err="failed to get container status \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": rpc error: code = NotFound desc = could not find container \"877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58\": container with ID starting with 877601ea3a274df873ed3165ae9f9e407093527a55ca10bb9a1c6092312c1b58 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211415 4926 scope.go:117] "RemoveContainer" containerID="20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211634 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4"} err="failed to get container status \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": rpc error: code = NotFound desc = could not find container \"20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4\": container with ID starting with 20393151c13ddc03e23c32092c70e09c99527d0dd7554c02fc9d2ef8f6e38ba4 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211654 4926 scope.go:117] "RemoveContainer" containerID="4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211833 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2"} err="failed to get container status \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": rpc error: code = NotFound desc = could not find container \"4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2\": container with ID starting with 4dd9889bb8600e958757f70ba92e2e7742f9f4e8ce9bf9c11dc4a2fd4ba0aaa2 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.211850 4926 scope.go:117] "RemoveContainer" containerID="7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.212179 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9"} err="failed to get container status \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": rpc error: code = NotFound desc = could not find container \"7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9\": container with ID starting with 7f647a7091a4d0acbf116e2583851b13ec4415004447202e892f2b24b7acf5e9 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.212198 4926 scope.go:117] "RemoveContainer" containerID="fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.212434 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1"} err="failed to get container status \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": rpc error: code = NotFound desc = could not find container \"fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1\": container with ID starting with fde011e559f073f261a07a3e55c05d88d1b11a16b6858b74cb8ef79b97c266b1 not found: ID does not exist" Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.936861 4926 generic.go:334] "Generic (PLEG): container finished" podID="b85a9da2-2c6e-4b71-870e-5d64d6832f37" containerID="805e49f90564bcb87c4c31126a8ff3fb8407888455b6d14a5498c74ae40f63c3" exitCode=0 Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.936945 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerDied","Data":"805e49f90564bcb87c4c31126a8ff3fb8407888455b6d14a5498c74ae40f63c3"} Nov 22 10:50:13 crc kubenswrapper[4926]: I1122 10:50:13.940847 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/2.log" Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.589423 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25bc94bb-a5d1-431c-9847-2f6a02997e25" path="/var/lib/kubelet/pods/25bc94bb-a5d1-431c-9847-2f6a02997e25/volumes" Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957066 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"55a44b777871b36886b78c0787f97f485f3459f5acf92c76e88059d513561f24"} Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"203cec3cf936fe08516cc5584995d96f1aeedb11e1be7eac656deb13875a26df"} Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957506 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"9b3fcae9f22eda468383b8dce0e5dea6e8d17b1f21f2318fcd4c352f8ebb7075"} Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957526 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"c6f4d9f871592d628f809942af19af46b7853e27989b501db20fd681eb6c6a29"} Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957544 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"abd44bb1864b6b1f64e0a361b2caae4246e08bf126cd63da701bf24603581ec9"} Nov 22 10:50:14 crc kubenswrapper[4926]: I1122 10:50:14.957561 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"0a5fa9f0d7da3ad31ebcc0fb4b5bc69db5c1d52f5da2f144fb6dcac702c1d0e0"} Nov 22 10:50:17 crc kubenswrapper[4926]: I1122 10:50:17.981814 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"37b702aa43968d3a16a43e94a5a22156c2e50aebb2f6fe06ba6ba52988bf4a5f"} Nov 22 10:50:19 crc kubenswrapper[4926]: I1122 10:50:19.999136 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" event={"ID":"b85a9da2-2c6e-4b71-870e-5d64d6832f37","Type":"ContainerStarted","Data":"355e1a65b546fb371310c6e3c74d95e0cd5f5705b37522bd72b9e62e62c70114"} Nov 22 10:50:19 crc kubenswrapper[4926]: I1122 10:50:19.999648 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:19 crc kubenswrapper[4926]: I1122 10:50:19.999662 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:20 crc kubenswrapper[4926]: I1122 10:50:20.032766 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:20 crc kubenswrapper[4926]: I1122 10:50:20.032801 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" podStartSLOduration=8.03278459 podStartE2EDuration="8.03278459s" podCreationTimestamp="2025-11-22 10:50:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:50:20.030818104 +0000 UTC m=+640.332423431" watchObservedRunningTime="2025-11-22 10:50:20.03278459 +0000 UTC m=+640.334389877" Nov 22 10:50:21 crc kubenswrapper[4926]: I1122 10:50:21.003269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:21 crc kubenswrapper[4926]: I1122 10:50:21.035932 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:28 crc kubenswrapper[4926]: I1122 10:50:28.581306 4926 scope.go:117] "RemoveContainer" containerID="714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74" Nov 22 10:50:28 crc kubenswrapper[4926]: E1122 10:50:28.581951 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-c6w2q_openshift-multus(36de2843-6491-4c54-b624-c4a3d328c164)\"" pod="openshift-multus/multus-c6w2q" podUID="36de2843-6491-4c54-b624-c4a3d328c164" Nov 22 10:50:42 crc kubenswrapper[4926]: I1122 10:50:42.886826 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-rdpq5" Nov 22 10:50:43 crc kubenswrapper[4926]: I1122 10:50:43.582577 4926 scope.go:117] "RemoveContainer" containerID="714201c7b206d73e8fd6e2f4d27cba426c1666275e34a169d3f59828f19bba74" Nov 22 10:50:44 crc kubenswrapper[4926]: I1122 10:50:44.160040 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-c6w2q_36de2843-6491-4c54-b624-c4a3d328c164/kube-multus/2.log" Nov 22 10:50:44 crc kubenswrapper[4926]: I1122 10:50:44.160409 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-c6w2q" event={"ID":"36de2843-6491-4c54-b624-c4a3d328c164","Type":"ContainerStarted","Data":"3d41c93e9a2edcba8e9ac672b0b1ed615df57f6ed3237cda7bc89f43689d97f6"} Nov 22 10:50:50 crc kubenswrapper[4926]: I1122 10:50:50.971353 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf"] Nov 22 10:50:50 crc kubenswrapper[4926]: I1122 10:50:50.972756 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:50 crc kubenswrapper[4926]: I1122 10:50:50.979947 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 10:50:50 crc kubenswrapper[4926]: I1122 10:50:50.986834 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf"] Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.119390 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mljp2\" (UniqueName: \"kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.119457 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.119774 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.221427 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.221514 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mljp2\" (UniqueName: \"kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.221540 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.222049 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.222057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.251237 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mljp2\" (UniqueName: \"kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.290111 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:51 crc kubenswrapper[4926]: I1122 10:50:51.535671 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf"] Nov 22 10:50:51 crc kubenswrapper[4926]: W1122 10:50:51.544962 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5213e13_fc34_444f_91d3_df6d09816a68.slice/crio-16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5 WatchSource:0}: Error finding container 16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5: Status 404 returned error can't find the container with id 16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5 Nov 22 10:50:52 crc kubenswrapper[4926]: I1122 10:50:52.216438 4926 generic.go:334] "Generic (PLEG): container finished" podID="a5213e13-fc34-444f-91d3-df6d09816a68" containerID="74b904b81ef880819053210738013d284fe81e565b0d3df1c71eb24334634d33" exitCode=0 Nov 22 10:50:52 crc kubenswrapper[4926]: I1122 10:50:52.216527 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerDied","Data":"74b904b81ef880819053210738013d284fe81e565b0d3df1c71eb24334634d33"} Nov 22 10:50:52 crc kubenswrapper[4926]: I1122 10:50:52.216925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerStarted","Data":"16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5"} Nov 22 10:50:53 crc kubenswrapper[4926]: I1122 10:50:53.222803 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerStarted","Data":"aa50e518fdf185c906590968b7c06b6c4c3465f41c6432e2634db91daa413d53"} Nov 22 10:50:54 crc kubenswrapper[4926]: I1122 10:50:54.231697 4926 generic.go:334] "Generic (PLEG): container finished" podID="a5213e13-fc34-444f-91d3-df6d09816a68" containerID="aa50e518fdf185c906590968b7c06b6c4c3465f41c6432e2634db91daa413d53" exitCode=0 Nov 22 10:50:54 crc kubenswrapper[4926]: I1122 10:50:54.231755 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerDied","Data":"aa50e518fdf185c906590968b7c06b6c4c3465f41c6432e2634db91daa413d53"} Nov 22 10:50:55 crc kubenswrapper[4926]: I1122 10:50:55.245585 4926 generic.go:334] "Generic (PLEG): container finished" podID="a5213e13-fc34-444f-91d3-df6d09816a68" containerID="b2462244b708b3341aeb3a2a164b60758cf1c510b9707d8ef8210ac8277d09e4" exitCode=0 Nov 22 10:50:55 crc kubenswrapper[4926]: I1122 10:50:55.245646 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerDied","Data":"b2462244b708b3341aeb3a2a164b60758cf1c510b9707d8ef8210ac8277d09e4"} Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.550538 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.692377 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util\") pod \"a5213e13-fc34-444f-91d3-df6d09816a68\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.692441 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mljp2\" (UniqueName: \"kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2\") pod \"a5213e13-fc34-444f-91d3-df6d09816a68\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.692471 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle\") pod \"a5213e13-fc34-444f-91d3-df6d09816a68\" (UID: \"a5213e13-fc34-444f-91d3-df6d09816a68\") " Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.693595 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle" (OuterVolumeSpecName: "bundle") pod "a5213e13-fc34-444f-91d3-df6d09816a68" (UID: "a5213e13-fc34-444f-91d3-df6d09816a68"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.702179 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2" (OuterVolumeSpecName: "kube-api-access-mljp2") pod "a5213e13-fc34-444f-91d3-df6d09816a68" (UID: "a5213e13-fc34-444f-91d3-df6d09816a68"). InnerVolumeSpecName "kube-api-access-mljp2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.705744 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util" (OuterVolumeSpecName: "util") pod "a5213e13-fc34-444f-91d3-df6d09816a68" (UID: "a5213e13-fc34-444f-91d3-df6d09816a68"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.794859 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.795002 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/a5213e13-fc34-444f-91d3-df6d09816a68-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:56 crc kubenswrapper[4926]: I1122 10:50:56.795023 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mljp2\" (UniqueName: \"kubernetes.io/projected/a5213e13-fc34-444f-91d3-df6d09816a68-kube-api-access-mljp2\") on node \"crc\" DevicePath \"\"" Nov 22 10:50:57 crc kubenswrapper[4926]: I1122 10:50:57.264062 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" event={"ID":"a5213e13-fc34-444f-91d3-df6d09816a68","Type":"ContainerDied","Data":"16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5"} Nov 22 10:50:57 crc kubenswrapper[4926]: I1122 10:50:57.264149 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16b70799e2fc6cbc5e96b4e3e650666f0a8d4ba090695651e330a817af1427e5" Nov 22 10:50:57 crc kubenswrapper[4926]: I1122 10:50:57.264188 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.496774 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2m244"] Nov 22 10:50:59 crc kubenswrapper[4926]: E1122 10:50:59.497309 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="pull" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.497322 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="pull" Nov 22 10:50:59 crc kubenswrapper[4926]: E1122 10:50:59.497335 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="util" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.497341 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="util" Nov 22 10:50:59 crc kubenswrapper[4926]: E1122 10:50:59.497350 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="extract" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.497357 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="extract" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.497470 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5213e13-fc34-444f-91d3-df6d09816a68" containerName="extract" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.497799 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.500837 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.500956 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.501058 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-zt7fw" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.511754 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2m244"] Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.629561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkb5k\" (UniqueName: \"kubernetes.io/projected/251bb93a-68e8-4e17-98ac-0dc9c7f31ace-kube-api-access-xkb5k\") pod \"nmstate-operator-557fdffb88-2m244\" (UID: \"251bb93a-68e8-4e17-98ac-0dc9c7f31ace\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.731230 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkb5k\" (UniqueName: \"kubernetes.io/projected/251bb93a-68e8-4e17-98ac-0dc9c7f31ace-kube-api-access-xkb5k\") pod \"nmstate-operator-557fdffb88-2m244\" (UID: \"251bb93a-68e8-4e17-98ac-0dc9c7f31ace\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.748708 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkb5k\" (UniqueName: \"kubernetes.io/projected/251bb93a-68e8-4e17-98ac-0dc9c7f31ace-kube-api-access-xkb5k\") pod \"nmstate-operator-557fdffb88-2m244\" (UID: \"251bb93a-68e8-4e17-98ac-0dc9c7f31ace\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" Nov 22 10:50:59 crc kubenswrapper[4926]: I1122 10:50:59.812048 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" Nov 22 10:51:00 crc kubenswrapper[4926]: I1122 10:51:00.017145 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2m244"] Nov 22 10:51:00 crc kubenswrapper[4926]: I1122 10:51:00.283306 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" event={"ID":"251bb93a-68e8-4e17-98ac-0dc9c7f31ace","Type":"ContainerStarted","Data":"5e68d0e1c72509453907f817a5e0fd0ef1329fafe56513680c86eaa8c14d16c5"} Nov 22 10:51:02 crc kubenswrapper[4926]: I1122 10:51:02.300743 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" event={"ID":"251bb93a-68e8-4e17-98ac-0dc9c7f31ace","Type":"ContainerStarted","Data":"18437732bfe8c334c7609d6adf25d580c80521b7c368ca837036d25f959b4c60"} Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.163477 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-2m244" podStartSLOduration=2.629444646 podStartE2EDuration="4.163457408s" podCreationTimestamp="2025-11-22 10:50:59 +0000 UTC" firstStartedPulling="2025-11-22 10:51:00.02965669 +0000 UTC m=+680.331261977" lastFinishedPulling="2025-11-22 10:51:01.563669452 +0000 UTC m=+681.865274739" observedRunningTime="2025-11-22 10:51:02.340145232 +0000 UTC m=+682.641750549" watchObservedRunningTime="2025-11-22 10:51:03.163457408 +0000 UTC m=+683.465062695" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.165055 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.166007 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.169640 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-2wzcg" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.178651 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.179398 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.182967 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.189348 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.196771 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.206243 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-lt5kb"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.207302 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285585 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq9k2\" (UniqueName: \"kubernetes.io/projected/1b73e906-db9e-454d-8316-2266a666d683-kube-api-access-tq9k2\") pod \"nmstate-metrics-5dcf9c57c5-vfdhw\" (UID: \"1b73e906-db9e-454d-8316-2266a666d683\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285652 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285680 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-nmstate-lock\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285809 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-ovs-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285877 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwmgn\" (UniqueName: \"kubernetes.io/projected/c24318e9-ff38-4221-8931-046cb1c39368-kube-api-access-dwmgn\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285938 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wgf8\" (UniqueName: \"kubernetes.io/projected/cae34914-f89d-4a66-bb66-901024424e79-kube-api-access-6wgf8\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.285983 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-dbus-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.319298 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.320034 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.322431 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.322531 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-g9bz7" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.322651 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.333446 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.386628 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j68kr\" (UniqueName: \"kubernetes.io/projected/1d24ff8c-3a27-452a-a473-90e139c30740-kube-api-access-j68kr\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.386690 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wgf8\" (UniqueName: \"kubernetes.io/projected/cae34914-f89d-4a66-bb66-901024424e79-kube-api-access-6wgf8\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.386728 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-dbus-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.386776 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d24ff8c-3a27-452a-a473-90e139c30740-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387125 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-dbus-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387162 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq9k2\" (UniqueName: \"kubernetes.io/projected/1b73e906-db9e-454d-8316-2266a666d683-kube-api-access-tq9k2\") pod \"nmstate-metrics-5dcf9c57c5-vfdhw\" (UID: \"1b73e906-db9e-454d-8316-2266a666d683\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387199 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d24ff8c-3a27-452a-a473-90e139c30740-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387217 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387234 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-nmstate-lock\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: E1122 10:51:03.387318 4926 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387454 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-ovs-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: E1122 10:51:03.387483 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair podName:c24318e9-ff38-4221-8931-046cb1c39368 nodeName:}" failed. No retries permitted until 2025-11-22 10:51:03.887462303 +0000 UTC m=+684.189067590 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair") pod "nmstate-webhook-6b89b748d8-zttzw" (UID: "c24318e9-ff38-4221-8931-046cb1c39368") : secret "openshift-nmstate-webhook" not found Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387423 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-ovs-socket\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cae34914-f89d-4a66-bb66-901024424e79-nmstate-lock\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.387560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwmgn\" (UniqueName: \"kubernetes.io/projected/c24318e9-ff38-4221-8931-046cb1c39368-kube-api-access-dwmgn\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.408001 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wgf8\" (UniqueName: \"kubernetes.io/projected/cae34914-f89d-4a66-bb66-901024424e79-kube-api-access-6wgf8\") pod \"nmstate-handler-lt5kb\" (UID: \"cae34914-f89d-4a66-bb66-901024424e79\") " pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.408393 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwmgn\" (UniqueName: \"kubernetes.io/projected/c24318e9-ff38-4221-8931-046cb1c39368-kube-api-access-dwmgn\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.409322 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq9k2\" (UniqueName: \"kubernetes.io/projected/1b73e906-db9e-454d-8316-2266a666d683-kube-api-access-tq9k2\") pod \"nmstate-metrics-5dcf9c57c5-vfdhw\" (UID: \"1b73e906-db9e-454d-8316-2266a666d683\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.483258 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.488636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j68kr\" (UniqueName: \"kubernetes.io/projected/1d24ff8c-3a27-452a-a473-90e139c30740-kube-api-access-j68kr\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.488695 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d24ff8c-3a27-452a-a473-90e139c30740-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.488752 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d24ff8c-3a27-452a-a473-90e139c30740-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.489618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d24ff8c-3a27-452a-a473-90e139c30740-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.494646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d24ff8c-3a27-452a-a473-90e139c30740-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.511743 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-6b7567bc44-sffc8"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.512847 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.515641 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j68kr\" (UniqueName: \"kubernetes.io/projected/1d24ff8c-3a27-452a-a473-90e139c30740-kube-api-access-j68kr\") pod \"nmstate-console-plugin-5874bd7bc5-slspm\" (UID: \"1d24ff8c-3a27-452a-a473-90e139c30740\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.526122 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.529758 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b7567bc44-sffc8"] Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590257 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590306 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-oauth-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590333 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdvxg\" (UniqueName: \"kubernetes.io/projected/05d9bb70-c59a-4466-8c92-14cfb8793f85-kube-api-access-kdvxg\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590364 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-trusted-ca-bundle\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590421 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590488 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-oauth-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.590515 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-service-ca\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.637187 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.693759 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-service-ca\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.693888 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.693925 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-oauth-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.693955 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdvxg\" (UniqueName: \"kubernetes.io/projected/05d9bb70-c59a-4466-8c92-14cfb8793f85-kube-api-access-kdvxg\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.693976 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-trusted-ca-bundle\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.694019 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.694097 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-oauth-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.695564 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-service-ca\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.699736 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.699819 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-trusted-ca-bundle\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.700354 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/05d9bb70-c59a-4466-8c92-14cfb8793f85-oauth-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.719695 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-oauth-config\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.720393 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/05d9bb70-c59a-4466-8c92-14cfb8793f85-console-serving-cert\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.727627 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdvxg\" (UniqueName: \"kubernetes.io/projected/05d9bb70-c59a-4466-8c92-14cfb8793f85-kube-api-access-kdvxg\") pod \"console-6b7567bc44-sffc8\" (UID: \"05d9bb70-c59a-4466-8c92-14cfb8793f85\") " pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.762622 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw"] Nov 22 10:51:03 crc kubenswrapper[4926]: W1122 10:51:03.769822 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b73e906_db9e_454d_8316_2266a666d683.slice/crio-62e3d2ce83532363cca7307b13a9acf8e5af8998e443a9adf19996eda087ef6f WatchSource:0}: Error finding container 62e3d2ce83532363cca7307b13a9acf8e5af8998e443a9adf19996eda087ef6f: Status 404 returned error can't find the container with id 62e3d2ce83532363cca7307b13a9acf8e5af8998e443a9adf19996eda087ef6f Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.862346 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.896698 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:03 crc kubenswrapper[4926]: I1122 10:51:03.901508 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c24318e9-ff38-4221-8931-046cb1c39368-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-zttzw\" (UID: \"c24318e9-ff38-4221-8931-046cb1c39368\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.096089 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.112731 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-6b7567bc44-sffc8"] Nov 22 10:51:04 crc kubenswrapper[4926]: W1122 10:51:04.119655 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05d9bb70_c59a_4466_8c92_14cfb8793f85.slice/crio-e7d37fe458d859421dffb4218648884c8e5d74fa0751cfaba99d0d8ec5c0d582 WatchSource:0}: Error finding container e7d37fe458d859421dffb4218648884c8e5d74fa0751cfaba99d0d8ec5c0d582: Status 404 returned error can't find the container with id e7d37fe458d859421dffb4218648884c8e5d74fa0751cfaba99d0d8ec5c0d582 Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.163116 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm"] Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.278324 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw"] Nov 22 10:51:04 crc kubenswrapper[4926]: W1122 10:51:04.285129 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc24318e9_ff38_4221_8931_046cb1c39368.slice/crio-f42c52743d955ebb21357a95e8c0b92aef7dfb642a80f45c3d059897c48919f1 WatchSource:0}: Error finding container f42c52743d955ebb21357a95e8c0b92aef7dfb642a80f45c3d059897c48919f1: Status 404 returned error can't find the container with id f42c52743d955ebb21357a95e8c0b92aef7dfb642a80f45c3d059897c48919f1 Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.312368 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" event={"ID":"1b73e906-db9e-454d-8316-2266a666d683","Type":"ContainerStarted","Data":"62e3d2ce83532363cca7307b13a9acf8e5af8998e443a9adf19996eda087ef6f"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.313606 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b7567bc44-sffc8" event={"ID":"05d9bb70-c59a-4466-8c92-14cfb8793f85","Type":"ContainerStarted","Data":"f6627df672b06b0fe4e8b79665408b68802219568b6f67b15f08b72a7603b818"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.313667 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-6b7567bc44-sffc8" event={"ID":"05d9bb70-c59a-4466-8c92-14cfb8793f85","Type":"ContainerStarted","Data":"e7d37fe458d859421dffb4218648884c8e5d74fa0751cfaba99d0d8ec5c0d582"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.314588 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" event={"ID":"c24318e9-ff38-4221-8931-046cb1c39368","Type":"ContainerStarted","Data":"f42c52743d955ebb21357a95e8c0b92aef7dfb642a80f45c3d059897c48919f1"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.315793 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" event={"ID":"1d24ff8c-3a27-452a-a473-90e139c30740","Type":"ContainerStarted","Data":"84cd53b2c25854534220d29b7e7e5a7c7eeba28685195d2650af81d7a5c5dbe1"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.317392 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-lt5kb" event={"ID":"cae34914-f89d-4a66-bb66-901024424e79","Type":"ContainerStarted","Data":"43cb6783d4fdd4fd30b7b803ab1791625289f9b079f96fef4072321e06713ad2"} Nov 22 10:51:04 crc kubenswrapper[4926]: I1122 10:51:04.332242 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-6b7567bc44-sffc8" podStartSLOduration=1.33221908 podStartE2EDuration="1.33221908s" podCreationTimestamp="2025-11-22 10:51:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:51:04.329380109 +0000 UTC m=+684.630985406" watchObservedRunningTime="2025-11-22 10:51:04.33221908 +0000 UTC m=+684.633824367" Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.344150 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" event={"ID":"c24318e9-ff38-4221-8931-046cb1c39368","Type":"ContainerStarted","Data":"99f79d30bc72f1e1d63b9413c8d9e361a165c6bcedb673488461d5a9854f34a0"} Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.344991 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.346916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" event={"ID":"1d24ff8c-3a27-452a-a473-90e139c30740","Type":"ContainerStarted","Data":"d883ae3411c43db25e1cbd1e68627812ed81b91cee4d665bf6ebe690f3008b2e"} Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.348686 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-lt5kb" event={"ID":"cae34914-f89d-4a66-bb66-901024424e79","Type":"ContainerStarted","Data":"a1310cb7a3e9b03c1682c1097ab7f6f87e9aaf144570ae03ad1012c98d735077"} Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.348851 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.350726 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" event={"ID":"1b73e906-db9e-454d-8316-2266a666d683","Type":"ContainerStarted","Data":"fd71f7a36c8103e2146afad82925a2ba5aabe40b70383d5d2250736301d64169"} Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.404264 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" podStartSLOduration=2.432918812 podStartE2EDuration="4.404236658s" podCreationTimestamp="2025-11-22 10:51:03 +0000 UTC" firstStartedPulling="2025-11-22 10:51:04.28695813 +0000 UTC m=+684.588563417" lastFinishedPulling="2025-11-22 10:51:06.258275976 +0000 UTC m=+686.559881263" observedRunningTime="2025-11-22 10:51:07.367800339 +0000 UTC m=+687.669405636" watchObservedRunningTime="2025-11-22 10:51:07.404236658 +0000 UTC m=+687.705841965" Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.405656 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-lt5kb" podStartSLOduration=1.709178554 podStartE2EDuration="4.405646138s" podCreationTimestamp="2025-11-22 10:51:03 +0000 UTC" firstStartedPulling="2025-11-22 10:51:03.557678984 +0000 UTC m=+683.859284271" lastFinishedPulling="2025-11-22 10:51:06.254146538 +0000 UTC m=+686.555751855" observedRunningTime="2025-11-22 10:51:07.393850812 +0000 UTC m=+687.695456119" watchObservedRunningTime="2025-11-22 10:51:07.405646138 +0000 UTC m=+687.707251465" Nov 22 10:51:07 crc kubenswrapper[4926]: I1122 10:51:07.413328 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-slspm" podStartSLOduration=2.348326161 podStartE2EDuration="4.413306056s" podCreationTimestamp="2025-11-22 10:51:03 +0000 UTC" firstStartedPulling="2025-11-22 10:51:04.194546576 +0000 UTC m=+684.496151863" lastFinishedPulling="2025-11-22 10:51:06.259526431 +0000 UTC m=+686.561131758" observedRunningTime="2025-11-22 10:51:07.41131618 +0000 UTC m=+687.712921517" watchObservedRunningTime="2025-11-22 10:51:07.413306056 +0000 UTC m=+687.714911343" Nov 22 10:51:08 crc kubenswrapper[4926]: I1122 10:51:08.357527 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" event={"ID":"1b73e906-db9e-454d-8316-2266a666d683","Type":"ContainerStarted","Data":"18255010f648a81368fd0e75e9bedc576ac2a4b82e6772c778994ef438329a87"} Nov 22 10:51:08 crc kubenswrapper[4926]: I1122 10:51:08.376805 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-vfdhw" podStartSLOduration=0.936107961 podStartE2EDuration="5.376763427s" podCreationTimestamp="2025-11-22 10:51:03 +0000 UTC" firstStartedPulling="2025-11-22 10:51:03.771597852 +0000 UTC m=+684.073203149" lastFinishedPulling="2025-11-22 10:51:08.212253328 +0000 UTC m=+688.513858615" observedRunningTime="2025-11-22 10:51:08.374496112 +0000 UTC m=+688.676101399" watchObservedRunningTime="2025-11-22 10:51:08.376763427 +0000 UTC m=+688.678368714" Nov 22 10:51:13 crc kubenswrapper[4926]: I1122 10:51:13.563390 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-lt5kb" Nov 22 10:51:13 crc kubenswrapper[4926]: I1122 10:51:13.863394 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:13 crc kubenswrapper[4926]: I1122 10:51:13.863464 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:13 crc kubenswrapper[4926]: I1122 10:51:13.871585 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:14 crc kubenswrapper[4926]: I1122 10:51:14.405923 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-6b7567bc44-sffc8" Nov 22 10:51:14 crc kubenswrapper[4926]: I1122 10:51:14.457597 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:51:24 crc kubenswrapper[4926]: I1122 10:51:24.105852 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-zttzw" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.619788 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq"] Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.621197 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.624729 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.633506 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq"] Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.798395 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.798681 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p84m\" (UniqueName: \"kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.798831 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.900485 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.900614 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p84m\" (UniqueName: \"kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.900660 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.901560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.901626 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.936170 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p84m\" (UniqueName: \"kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:36 crc kubenswrapper[4926]: I1122 10:51:36.940815 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:37 crc kubenswrapper[4926]: I1122 10:51:37.414803 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq"] Nov 22 10:51:37 crc kubenswrapper[4926]: I1122 10:51:37.563760 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" event={"ID":"de687d1f-94d7-4503-9599-7a43bff94909","Type":"ContainerStarted","Data":"5b2cc070de57fdc6fdf8adba0e70b8a6fb31d834d8d14d25d0842d314561cb62"} Nov 22 10:51:38 crc kubenswrapper[4926]: I1122 10:51:38.572698 4926 generic.go:334] "Generic (PLEG): container finished" podID="de687d1f-94d7-4503-9599-7a43bff94909" containerID="2d47f61aec78b8cc66337c2b4121004184bd4fce1a4fb28793608d36aaddf02e" exitCode=0 Nov 22 10:51:38 crc kubenswrapper[4926]: I1122 10:51:38.572774 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" event={"ID":"de687d1f-94d7-4503-9599-7a43bff94909","Type":"ContainerDied","Data":"2d47f61aec78b8cc66337c2b4121004184bd4fce1a4fb28793608d36aaddf02e"} Nov 22 10:51:39 crc kubenswrapper[4926]: I1122 10:51:39.497396 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-nsj2w" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerName="console" containerID="cri-o://cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff" gracePeriod=15 Nov 22 10:51:39 crc kubenswrapper[4926]: I1122 10:51:39.660829 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:51:39 crc kubenswrapper[4926]: I1122 10:51:39.661146 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:51:39 crc kubenswrapper[4926]: I1122 10:51:39.895329 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-nsj2w_ae977eb4-8273-4dab-9e39-80c36ccd63e2/console/0.log" Nov 22 10:51:39 crc kubenswrapper[4926]: I1122 10:51:39.895407 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.049347 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.049463 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.049505 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.049534 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.049593 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.050498 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config" (OuterVolumeSpecName: "console-config") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.050581 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca" (OuterVolumeSpecName: "service-ca") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.050648 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.051055 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkjrs\" (UniqueName: \"kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs\") pod \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\" (UID: \"ae977eb4-8273-4dab-9e39-80c36ccd63e2\") " Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.051608 4926 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.051647 4926 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.051604 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.051834 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.056003 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.056971 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs" (OuterVolumeSpecName: "kube-api-access-jkjrs") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "kube-api-access-jkjrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.057724 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ae977eb4-8273-4dab-9e39-80c36ccd63e2" (UID: "ae977eb4-8273-4dab-9e39-80c36ccd63e2"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.153573 4926 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.153654 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkjrs\" (UniqueName: \"kubernetes.io/projected/ae977eb4-8273-4dab-9e39-80c36ccd63e2-kube-api-access-jkjrs\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.153673 4926 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.153686 4926 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ae977eb4-8273-4dab-9e39-80c36ccd63e2-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.153698 4926 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ae977eb4-8273-4dab-9e39-80c36ccd63e2-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.584796 4926 generic.go:334] "Generic (PLEG): container finished" podID="de687d1f-94d7-4503-9599-7a43bff94909" containerID="7621326c65ce294b8cbc240581c7a24c00a7532d5df129a76f8dbb79fa30b141" exitCode=0 Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.588738 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" event={"ID":"de687d1f-94d7-4503-9599-7a43bff94909","Type":"ContainerDied","Data":"7621326c65ce294b8cbc240581c7a24c00a7532d5df129a76f8dbb79fa30b141"} Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.590454 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-nsj2w_ae977eb4-8273-4dab-9e39-80c36ccd63e2/console/0.log" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.591401 4926 generic.go:334] "Generic (PLEG): container finished" podID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerID="cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff" exitCode=2 Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.591454 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-nsj2w" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.591457 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nsj2w" event={"ID":"ae977eb4-8273-4dab-9e39-80c36ccd63e2","Type":"ContainerDied","Data":"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff"} Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.591731 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-nsj2w" event={"ID":"ae977eb4-8273-4dab-9e39-80c36ccd63e2","Type":"ContainerDied","Data":"dfc574c382b4bbf023e7b4df1fa5c865f3cec91fbb1e5daf5222d7d7e9d1ba47"} Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.591762 4926 scope.go:117] "RemoveContainer" containerID="cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.612213 4926 scope.go:117] "RemoveContainer" containerID="cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff" Nov 22 10:51:40 crc kubenswrapper[4926]: E1122 10:51:40.612627 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff\": container with ID starting with cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff not found: ID does not exist" containerID="cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.612658 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff"} err="failed to get container status \"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff\": rpc error: code = NotFound desc = could not find container \"cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff\": container with ID starting with cb7a55cbc2e4bc4cdf65ef747993bfb4d39e1d913f9c5d25263a88a4044fdfff not found: ID does not exist" Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.695362 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:51:40 crc kubenswrapper[4926]: I1122 10:51:40.701525 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-nsj2w"] Nov 22 10:51:41 crc kubenswrapper[4926]: I1122 10:51:41.603412 4926 generic.go:334] "Generic (PLEG): container finished" podID="de687d1f-94d7-4503-9599-7a43bff94909" containerID="c0667ac4782f5a445764a0461dc2131ba517132694f8a7de30050006f600a99e" exitCode=0 Nov 22 10:51:41 crc kubenswrapper[4926]: I1122 10:51:41.603466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" event={"ID":"de687d1f-94d7-4503-9599-7a43bff94909","Type":"ContainerDied","Data":"c0667ac4782f5a445764a0461dc2131ba517132694f8a7de30050006f600a99e"} Nov 22 10:51:42 crc kubenswrapper[4926]: I1122 10:51:42.588461 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" path="/var/lib/kubelet/pods/ae977eb4-8273-4dab-9e39-80c36ccd63e2/volumes" Nov 22 10:51:42 crc kubenswrapper[4926]: I1122 10:51:42.845768 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.042000 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p84m\" (UniqueName: \"kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m\") pod \"de687d1f-94d7-4503-9599-7a43bff94909\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.042465 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util\") pod \"de687d1f-94d7-4503-9599-7a43bff94909\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.042552 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle\") pod \"de687d1f-94d7-4503-9599-7a43bff94909\" (UID: \"de687d1f-94d7-4503-9599-7a43bff94909\") " Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.044473 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle" (OuterVolumeSpecName: "bundle") pod "de687d1f-94d7-4503-9599-7a43bff94909" (UID: "de687d1f-94d7-4503-9599-7a43bff94909"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.047071 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m" (OuterVolumeSpecName: "kube-api-access-4p84m") pod "de687d1f-94d7-4503-9599-7a43bff94909" (UID: "de687d1f-94d7-4503-9599-7a43bff94909"). InnerVolumeSpecName "kube-api-access-4p84m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.134803 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util" (OuterVolumeSpecName: "util") pod "de687d1f-94d7-4503-9599-7a43bff94909" (UID: "de687d1f-94d7-4503-9599-7a43bff94909"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.144287 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p84m\" (UniqueName: \"kubernetes.io/projected/de687d1f-94d7-4503-9599-7a43bff94909-kube-api-access-4p84m\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.144348 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.144375 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/de687d1f-94d7-4503-9599-7a43bff94909-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.616833 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" event={"ID":"de687d1f-94d7-4503-9599-7a43bff94909","Type":"ContainerDied","Data":"5b2cc070de57fdc6fdf8adba0e70b8a6fb31d834d8d14d25d0842d314561cb62"} Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.616914 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq" Nov 22 10:51:43 crc kubenswrapper[4926]: I1122 10:51:43.616916 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b2cc070de57fdc6fdf8adba0e70b8a6fb31d834d8d14d25d0842d314561cb62" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.917543 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m"] Nov 22 10:51:51 crc kubenswrapper[4926]: E1122 10:51:51.918206 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerName="console" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918219 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerName="console" Nov 22 10:51:51 crc kubenswrapper[4926]: E1122 10:51:51.918226 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="util" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918232 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="util" Nov 22 10:51:51 crc kubenswrapper[4926]: E1122 10:51:51.918250 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="extract" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918255 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="extract" Nov 22 10:51:51 crc kubenswrapper[4926]: E1122 10:51:51.918270 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="pull" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918275 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="pull" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918364 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae977eb4-8273-4dab-9e39-80c36ccd63e2" containerName="console" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918381 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="de687d1f-94d7-4503-9599-7a43bff94909" containerName="extract" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.918759 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.921503 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.921512 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.921683 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.921850 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.922732 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-j8f6f" Nov 22 10:51:51 crc kubenswrapper[4926]: I1122 10:51:51.939763 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m"] Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.058627 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-webhook-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.058711 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfwdd\" (UniqueName: \"kubernetes.io/projected/afb6b154-40e5-4285-9f49-38053bdbb6c4-kube-api-access-dfwdd\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.058823 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-apiservice-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.134193 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-676845568d-nb86k"] Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.134988 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.136621 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.136847 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.136880 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-lx8kv" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.156876 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-676845568d-nb86k"] Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.160829 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-apiservice-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.160877 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnmgt\" (UniqueName: \"kubernetes.io/projected/63e553c4-290f-4b65-a563-b57f0577c982-kube-api-access-pnmgt\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.160927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-apiservice-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.160944 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-webhook-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.160977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfwdd\" (UniqueName: \"kubernetes.io/projected/afb6b154-40e5-4285-9f49-38053bdbb6c4-kube-api-access-dfwdd\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.161160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-webhook-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.167520 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-apiservice-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.183600 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afb6b154-40e5-4285-9f49-38053bdbb6c4-webhook-cert\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.189790 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfwdd\" (UniqueName: \"kubernetes.io/projected/afb6b154-40e5-4285-9f49-38053bdbb6c4-kube-api-access-dfwdd\") pod \"metallb-operator-controller-manager-7d858964b4-hd89m\" (UID: \"afb6b154-40e5-4285-9f49-38053bdbb6c4\") " pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.238117 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.261944 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnmgt\" (UniqueName: \"kubernetes.io/projected/63e553c4-290f-4b65-a563-b57f0577c982-kube-api-access-pnmgt\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.262000 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-apiservice-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.262083 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-webhook-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.266630 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-webhook-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.266981 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/63e553c4-290f-4b65-a563-b57f0577c982-apiservice-cert\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.279083 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnmgt\" (UniqueName: \"kubernetes.io/projected/63e553c4-290f-4b65-a563-b57f0577c982-kube-api-access-pnmgt\") pod \"metallb-operator-webhook-server-676845568d-nb86k\" (UID: \"63e553c4-290f-4b65-a563-b57f0577c982\") " pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.449326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.562368 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m"] Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.681250 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" event={"ID":"afb6b154-40e5-4285-9f49-38053bdbb6c4","Type":"ContainerStarted","Data":"cf17b6af4794b9115dd1a580255eb2dc9d8126f5b8b5b27a3db5893f51187795"} Nov 22 10:51:52 crc kubenswrapper[4926]: I1122 10:51:52.701297 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-676845568d-nb86k"] Nov 22 10:51:52 crc kubenswrapper[4926]: W1122 10:51:52.711494 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod63e553c4_290f_4b65_a563_b57f0577c982.slice/crio-5ac27c779e3495fa318b3096c8d3f1717ab46a94bdab08e05fee61122186bc6d WatchSource:0}: Error finding container 5ac27c779e3495fa318b3096c8d3f1717ab46a94bdab08e05fee61122186bc6d: Status 404 returned error can't find the container with id 5ac27c779e3495fa318b3096c8d3f1717ab46a94bdab08e05fee61122186bc6d Nov 22 10:51:53 crc kubenswrapper[4926]: I1122 10:51:53.688515 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" event={"ID":"63e553c4-290f-4b65-a563-b57f0577c982","Type":"ContainerStarted","Data":"5ac27c779e3495fa318b3096c8d3f1717ab46a94bdab08e05fee61122186bc6d"} Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.713529 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" event={"ID":"afb6b154-40e5-4285-9f49-38053bdbb6c4","Type":"ContainerStarted","Data":"47997bba2c357af7a2e759ae5660c60fffe2bc75bcf8f7e91cff19afc72cfd63"} Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.714089 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.715313 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" event={"ID":"63e553c4-290f-4b65-a563-b57f0577c982","Type":"ContainerStarted","Data":"e8477526d9d42317d1f1fe33406765a90d491a81e583c6c08d22f950672d305d"} Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.715974 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.736990 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" podStartSLOduration=2.5924870479999997 podStartE2EDuration="6.736969904s" podCreationTimestamp="2025-11-22 10:51:51 +0000 UTC" firstStartedPulling="2025-11-22 10:51:52.593704533 +0000 UTC m=+732.895309820" lastFinishedPulling="2025-11-22 10:51:56.738187379 +0000 UTC m=+737.039792676" observedRunningTime="2025-11-22 10:51:57.731678844 +0000 UTC m=+738.033284151" watchObservedRunningTime="2025-11-22 10:51:57.736969904 +0000 UTC m=+738.038575201" Nov 22 10:51:57 crc kubenswrapper[4926]: I1122 10:51:57.750241 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" podStartSLOduration=1.7106393739999999 podStartE2EDuration="5.750190381s" podCreationTimestamp="2025-11-22 10:51:52 +0000 UTC" firstStartedPulling="2025-11-22 10:51:52.713506365 +0000 UTC m=+733.015111652" lastFinishedPulling="2025-11-22 10:51:56.753057382 +0000 UTC m=+737.054662659" observedRunningTime="2025-11-22 10:51:57.750199821 +0000 UTC m=+738.051805128" watchObservedRunningTime="2025-11-22 10:51:57.750190381 +0000 UTC m=+738.051795668" Nov 22 10:52:09 crc kubenswrapper[4926]: I1122 10:52:09.661521 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:52:09 crc kubenswrapper[4926]: I1122 10:52:09.663306 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:52:10 crc kubenswrapper[4926]: I1122 10:52:10.652656 4926 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","podae977eb4-8273-4dab-9e39-80c36ccd63e2"] err="unable to destroy cgroup paths for cgroup [kubepods burstable podae977eb4-8273-4dab-9e39-80c36ccd63e2] : Timed out while waiting for systemd to remove kubepods-burstable-podae977eb4_8273_4dab_9e39_80c36ccd63e2.slice" Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.494940 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.495475 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" podUID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" containerName="controller-manager" containerID="cri-o://3aa59bd6f40427a36bfdbe2fbf567da7b97b34306e0ceb3d7dd972d2c071941a" gracePeriod=30 Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.521689 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.521950 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerName="route-controller-manager" containerID="cri-o://2477155da5ed7e1348298f164923a548f3d685094db32f08c99b087ae44c14d2" gracePeriod=30 Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.809842 4926 generic.go:334] "Generic (PLEG): container finished" podID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerID="2477155da5ed7e1348298f164923a548f3d685094db32f08c99b087ae44c14d2" exitCode=0 Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.809951 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" event={"ID":"ce9a30da-be0f-49f1-8e0b-40bb1fba706a","Type":"ContainerDied","Data":"2477155da5ed7e1348298f164923a548f3d685094db32f08c99b087ae44c14d2"} Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.811680 4926 generic.go:334] "Generic (PLEG): container finished" podID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" containerID="3aa59bd6f40427a36bfdbe2fbf567da7b97b34306e0ceb3d7dd972d2c071941a" exitCode=0 Nov 22 10:52:11 crc kubenswrapper[4926]: I1122 10:52:11.811704 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" event={"ID":"aadd3c70-0dfa-42cb-879d-026a0ed055ba","Type":"ContainerDied","Data":"3aa59bd6f40427a36bfdbe2fbf567da7b97b34306e0ceb3d7dd972d2c071941a"} Nov 22 10:52:12 crc kubenswrapper[4926]: I1122 10:52:12.453971 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.145171 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.149158 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177080 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx"] Nov 22 10:52:13 crc kubenswrapper[4926]: E1122 10:52:13.177322 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" containerName="controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177337 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" containerName="controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: E1122 10:52:13.177352 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerName="route-controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177361 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerName="route-controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177463 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" containerName="controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177475 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" containerName="route-controller-manager" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.177925 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.223482 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx"] Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335081 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles\") pod \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335135 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch7n5\" (UniqueName: \"kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5\") pod \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335169 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert\") pod \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335188 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca\") pod \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335228 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config\") pod \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335258 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smgdm\" (UniqueName: \"kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm\") pod \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335292 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert\") pod \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335336 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca\") pod \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\" (UID: \"aadd3c70-0dfa-42cb-879d-026a0ed055ba\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335351 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config\") pod \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\" (UID: \"ce9a30da-be0f-49f1-8e0b-40bb1fba706a\") " Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335514 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-client-ca\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-config\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335578 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh66b\" (UniqueName: \"kubernetes.io/projected/359cb12f-f999-4689-bd14-6c79b247b38c-kube-api-access-dh66b\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.335593 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/359cb12f-f999-4689-bd14-6c79b247b38c-serving-cert\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.336346 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "aadd3c70-0dfa-42cb-879d-026a0ed055ba" (UID: "aadd3c70-0dfa-42cb-879d-026a0ed055ba"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.336349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca" (OuterVolumeSpecName: "client-ca") pod "ce9a30da-be0f-49f1-8e0b-40bb1fba706a" (UID: "ce9a30da-be0f-49f1-8e0b-40bb1fba706a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.336407 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config" (OuterVolumeSpecName: "config") pod "ce9a30da-be0f-49f1-8e0b-40bb1fba706a" (UID: "ce9a30da-be0f-49f1-8e0b-40bb1fba706a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.336515 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca" (OuterVolumeSpecName: "client-ca") pod "aadd3c70-0dfa-42cb-879d-026a0ed055ba" (UID: "aadd3c70-0dfa-42cb-879d-026a0ed055ba"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.336790 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config" (OuterVolumeSpecName: "config") pod "aadd3c70-0dfa-42cb-879d-026a0ed055ba" (UID: "aadd3c70-0dfa-42cb-879d-026a0ed055ba"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.342587 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aadd3c70-0dfa-42cb-879d-026a0ed055ba" (UID: "aadd3c70-0dfa-42cb-879d-026a0ed055ba"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.347642 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm" (OuterVolumeSpecName: "kube-api-access-smgdm") pod "aadd3c70-0dfa-42cb-879d-026a0ed055ba" (UID: "aadd3c70-0dfa-42cb-879d-026a0ed055ba"). InnerVolumeSpecName "kube-api-access-smgdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.349406 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5" (OuterVolumeSpecName: "kube-api-access-ch7n5") pod "ce9a30da-be0f-49f1-8e0b-40bb1fba706a" (UID: "ce9a30da-be0f-49f1-8e0b-40bb1fba706a"). InnerVolumeSpecName "kube-api-access-ch7n5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.357296 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ce9a30da-be0f-49f1-8e0b-40bb1fba706a" (UID: "ce9a30da-be0f-49f1-8e0b-40bb1fba706a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.436881 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh66b\" (UniqueName: \"kubernetes.io/projected/359cb12f-f999-4689-bd14-6c79b247b38c-kube-api-access-dh66b\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.437190 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/359cb12f-f999-4689-bd14-6c79b247b38c-serving-cert\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.437273 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-client-ca\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438341 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-client-ca\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438703 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-config\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438778 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/359cb12f-f999-4689-bd14-6c79b247b38c-config\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438859 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438879 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smgdm\" (UniqueName: \"kubernetes.io/projected/aadd3c70-0dfa-42cb-879d-026a0ed055ba-kube-api-access-smgdm\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438910 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438922 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438933 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438944 4926 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aadd3c70-0dfa-42cb-879d-026a0ed055ba-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438955 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch7n5\" (UniqueName: \"kubernetes.io/projected/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-kube-api-access-ch7n5\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438965 4926 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aadd3c70-0dfa-42cb-879d-026a0ed055ba-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.438975 4926 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ce9a30da-be0f-49f1-8e0b-40bb1fba706a-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.441301 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/359cb12f-f999-4689-bd14-6c79b247b38c-serving-cert\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.457804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh66b\" (UniqueName: \"kubernetes.io/projected/359cb12f-f999-4689-bd14-6c79b247b38c-kube-api-access-dh66b\") pod \"route-controller-manager-8464c679-tcvgx\" (UID: \"359cb12f-f999-4689-bd14-6c79b247b38c\") " pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.501094 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.715680 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx"] Nov 22 10:52:13 crc kubenswrapper[4926]: W1122 10:52:13.720259 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod359cb12f_f999_4689_bd14_6c79b247b38c.slice/crio-91258a1cca485ebb8d190265e76138ec9cca7c194165a4eb8e65ef890ccb9d57 WatchSource:0}: Error finding container 91258a1cca485ebb8d190265e76138ec9cca7c194165a4eb8e65ef890ccb9d57: Status 404 returned error can't find the container with id 91258a1cca485ebb8d190265e76138ec9cca7c194165a4eb8e65ef890ccb9d57 Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.829403 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" event={"ID":"ce9a30da-be0f-49f1-8e0b-40bb1fba706a","Type":"ContainerDied","Data":"e6815e2ad1bfc8fb43500737563811d011fdad0525f7984c290f7fabfd20f42e"} Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.829438 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.829455 4926 scope.go:117] "RemoveContainer" containerID="2477155da5ed7e1348298f164923a548f3d685094db32f08c99b087ae44c14d2" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.831147 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" event={"ID":"aadd3c70-0dfa-42cb-879d-026a0ed055ba","Type":"ContainerDied","Data":"fb2cca53a8dbf1a269fbc1f2306cfebb7f39b7fcaa396a524113c170a45d6caa"} Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.831214 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-h2qnt" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.833109 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" event={"ID":"359cb12f-f999-4689-bd14-6c79b247b38c","Type":"ContainerStarted","Data":"91258a1cca485ebb8d190265e76138ec9cca7c194165a4eb8e65ef890ccb9d57"} Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.847740 4926 scope.go:117] "RemoveContainer" containerID="3aa59bd6f40427a36bfdbe2fbf567da7b97b34306e0ceb3d7dd972d2c071941a" Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.872453 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.878044 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-h2qnt"] Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.882517 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:52:13 crc kubenswrapper[4926]: I1122 10:52:13.886447 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-pdkqk"] Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.589535 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aadd3c70-0dfa-42cb-879d-026a0ed055ba" path="/var/lib/kubelet/pods/aadd3c70-0dfa-42cb-879d-026a0ed055ba/volumes" Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.590176 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce9a30da-be0f-49f1-8e0b-40bb1fba706a" path="/var/lib/kubelet/pods/ce9a30da-be0f-49f1-8e0b-40bb1fba706a/volumes" Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.841394 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" event={"ID":"359cb12f-f999-4689-bd14-6c79b247b38c","Type":"ContainerStarted","Data":"9eac2f3c2f027ba20c369718acb3b2b3c6608425224e7f28c74e9f22f723cd79"} Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.842824 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.847098 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" Nov 22 10:52:14 crc kubenswrapper[4926]: I1122 10:52:14.859138 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8464c679-tcvgx" podStartSLOduration=3.859119516 podStartE2EDuration="3.859119516s" podCreationTimestamp="2025-11-22 10:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:52:14.855913215 +0000 UTC m=+755.157518502" watchObservedRunningTime="2025-11-22 10:52:14.859119516 +0000 UTC m=+755.160724813" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.722243 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-94c98cb89-wvqzc"] Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.723060 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.725509 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.725650 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.727508 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.727609 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.728243 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.729804 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.737657 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.737840 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-94c98cb89-wvqzc"] Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.767941 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81f78c18-eeaa-4657-851b-56ed82771979-serving-cert\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.768034 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-config\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.768057 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vspbs\" (UniqueName: \"kubernetes.io/projected/81f78c18-eeaa-4657-851b-56ed82771979-kube-api-access-vspbs\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.768076 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-client-ca\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.768131 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-proxy-ca-bundles\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.869081 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-config\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.869168 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vspbs\" (UniqueName: \"kubernetes.io/projected/81f78c18-eeaa-4657-851b-56ed82771979-kube-api-access-vspbs\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.869191 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-client-ca\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.869267 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-proxy-ca-bundles\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.869297 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81f78c18-eeaa-4657-851b-56ed82771979-serving-cert\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.870282 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-client-ca\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.870981 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-config\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.871642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/81f78c18-eeaa-4657-851b-56ed82771979-proxy-ca-bundles\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.881107 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81f78c18-eeaa-4657-851b-56ed82771979-serving-cert\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:15 crc kubenswrapper[4926]: I1122 10:52:15.895731 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vspbs\" (UniqueName: \"kubernetes.io/projected/81f78c18-eeaa-4657-851b-56ed82771979-kube-api-access-vspbs\") pod \"controller-manager-94c98cb89-wvqzc\" (UID: \"81f78c18-eeaa-4657-851b-56ed82771979\") " pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:16 crc kubenswrapper[4926]: I1122 10:52:16.038691 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:16 crc kubenswrapper[4926]: I1122 10:52:16.467678 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-94c98cb89-wvqzc"] Nov 22 10:52:16 crc kubenswrapper[4926]: W1122 10:52:16.471822 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod81f78c18_eeaa_4657_851b_56ed82771979.slice/crio-b774a839dc36001d57f045423bdd0b2c45fb6aa9e54d4be0be51e88e0ad20bc8 WatchSource:0}: Error finding container b774a839dc36001d57f045423bdd0b2c45fb6aa9e54d4be0be51e88e0ad20bc8: Status 404 returned error can't find the container with id b774a839dc36001d57f045423bdd0b2c45fb6aa9e54d4be0be51e88e0ad20bc8 Nov 22 10:52:16 crc kubenswrapper[4926]: I1122 10:52:16.860365 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" event={"ID":"81f78c18-eeaa-4657-851b-56ed82771979","Type":"ContainerStarted","Data":"4d467b0a9fc75d8edd6af45e30f3daad355649ec75f65c315b84fc6ebd995fba"} Nov 22 10:52:16 crc kubenswrapper[4926]: I1122 10:52:16.860714 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" event={"ID":"81f78c18-eeaa-4657-851b-56ed82771979","Type":"ContainerStarted","Data":"b774a839dc36001d57f045423bdd0b2c45fb6aa9e54d4be0be51e88e0ad20bc8"} Nov 22 10:52:16 crc kubenswrapper[4926]: I1122 10:52:16.893705 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" podStartSLOduration=5.893687672 podStartE2EDuration="5.893687672s" podCreationTimestamp="2025-11-22 10:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:52:16.893349982 +0000 UTC m=+757.194955289" watchObservedRunningTime="2025-11-22 10:52:16.893687672 +0000 UTC m=+757.195292959" Nov 22 10:52:17 crc kubenswrapper[4926]: I1122 10:52:17.842491 4926 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 10:52:17 crc kubenswrapper[4926]: I1122 10:52:17.865925 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:17 crc kubenswrapper[4926]: I1122 10:52:17.876278 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-94c98cb89-wvqzc" Nov 22 10:52:32 crc kubenswrapper[4926]: I1122 10:52:32.242857 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.103240 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-pb2r5"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.107609 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.109733 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-d4qd8" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.111166 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.111322 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.112028 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.113150 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.114812 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.145451 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.279721 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-xg4ns"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.280649 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.290319 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.290429 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.290619 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-29842" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.290786 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.294093 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-pv5tc"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.294931 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.296640 4926 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.302855 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-conf\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.302918 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-metrics\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.302946 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-sockets\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.302966 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-reloader\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.303032 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgxbl\" (UniqueName: \"kubernetes.io/projected/493e392b-c61f-4115-abdf-42a9c2febe81-kube-api-access-fgxbl\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.303064 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/493e392b-c61f-4115-abdf-42a9c2febe81-frr-startup\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.303086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e8452d37-6eed-427b-9741-bda6aea54331-cert\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.303115 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/493e392b-c61f-4115-abdf-42a9c2febe81-metrics-certs\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.303240 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmr8t\" (UniqueName: \"kubernetes.io/projected/e8452d37-6eed-427b-9741-bda6aea54331-kube-api-access-wmr8t\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.308171 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-pv5tc"] Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.404841 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-conf\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.404905 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-metrics\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.404930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-sockets\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.404959 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.404986 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-reloader\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405024 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-metrics-certs\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405046 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgxbl\" (UniqueName: \"kubernetes.io/projected/493e392b-c61f-4115-abdf-42a9c2febe81-kube-api-access-fgxbl\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405065 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-metrics-certs\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405205 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/493e392b-c61f-4115-abdf-42a9c2febe81-frr-startup\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405247 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e8452d37-6eed-427b-9741-bda6aea54331-cert\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405291 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqhrk\" (UniqueName: \"kubernetes.io/projected/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-kube-api-access-fqhrk\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405316 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/493e392b-c61f-4115-abdf-42a9c2febe81-metrics-certs\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405397 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-conf\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405406 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-cert\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405428 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-frr-sockets\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405458 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmr8t\" (UniqueName: \"kubernetes.io/projected/e8452d37-6eed-427b-9741-bda6aea54331-kube-api-access-wmr8t\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405491 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1302de5c-2784-4974-b2ac-3572fc73e1d9-metallb-excludel2\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405505 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-metrics\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxvjl\" (UniqueName: \"kubernetes.io/projected/1302de5c-2784-4974-b2ac-3572fc73e1d9-kube-api-access-gxvjl\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.405599 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/493e392b-c61f-4115-abdf-42a9c2febe81-reloader\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.406212 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/493e392b-c61f-4115-abdf-42a9c2febe81-frr-startup\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.412742 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/e8452d37-6eed-427b-9741-bda6aea54331-cert\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.424599 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgxbl\" (UniqueName: \"kubernetes.io/projected/493e392b-c61f-4115-abdf-42a9c2febe81-kube-api-access-fgxbl\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.426172 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/493e392b-c61f-4115-abdf-42a9c2febe81-metrics-certs\") pod \"frr-k8s-pb2r5\" (UID: \"493e392b-c61f-4115-abdf-42a9c2febe81\") " pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.435211 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.436168 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmr8t\" (UniqueName: \"kubernetes.io/projected/e8452d37-6eed-427b-9741-bda6aea54331-kube-api-access-wmr8t\") pod \"frr-k8s-webhook-server-6998585d5-ndwkc\" (UID: \"e8452d37-6eed-427b-9741-bda6aea54331\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.447300 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506229 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-metrics-certs\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506267 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-metrics-certs\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506306 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqhrk\" (UniqueName: \"kubernetes.io/projected/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-kube-api-access-fqhrk\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506348 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-cert\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1302de5c-2784-4974-b2ac-3572fc73e1d9-metallb-excludel2\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506381 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxvjl\" (UniqueName: \"kubernetes.io/projected/1302de5c-2784-4974-b2ac-3572fc73e1d9-kube-api-access-gxvjl\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.506406 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: E1122 10:52:33.506633 4926 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 10:52:33 crc kubenswrapper[4926]: E1122 10:52:33.506683 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist podName:1302de5c-2784-4974-b2ac-3572fc73e1d9 nodeName:}" failed. No retries permitted until 2025-11-22 10:52:34.006669652 +0000 UTC m=+774.308274939 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist") pod "speaker-xg4ns" (UID: "1302de5c-2784-4974-b2ac-3572fc73e1d9") : secret "metallb-memberlist" not found Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.507333 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1302de5c-2784-4974-b2ac-3572fc73e1d9-metallb-excludel2\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.511645 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-cert\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.511657 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-metrics-certs\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.515245 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-metrics-certs\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.526456 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxvjl\" (UniqueName: \"kubernetes.io/projected/1302de5c-2784-4974-b2ac-3572fc73e1d9-kube-api-access-gxvjl\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.526935 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqhrk\" (UniqueName: \"kubernetes.io/projected/1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc-kube-api-access-fqhrk\") pod \"controller-6c7b4b5f48-pv5tc\" (UID: \"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc\") " pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.609190 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.883103 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc"] Nov 22 10:52:33 crc kubenswrapper[4926]: W1122 10:52:33.894216 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8452d37_6eed_427b_9741_bda6aea54331.slice/crio-c00a054bfd021f7239553bfec17a33a3a4782bd9372e89f4b856238d1f6bbf8c WatchSource:0}: Error finding container c00a054bfd021f7239553bfec17a33a3a4782bd9372e89f4b856238d1f6bbf8c: Status 404 returned error can't find the container with id c00a054bfd021f7239553bfec17a33a3a4782bd9372e89f4b856238d1f6bbf8c Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.969864 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" event={"ID":"e8452d37-6eed-427b-9741-bda6aea54331","Type":"ContainerStarted","Data":"c00a054bfd021f7239553bfec17a33a3a4782bd9372e89f4b856238d1f6bbf8c"} Nov 22 10:52:33 crc kubenswrapper[4926]: I1122 10:52:33.973275 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"6e4177da6f57b94fd121d0aca2d8bfa94190e317a5a71854d53092ecdf2b6d30"} Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.001318 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-pv5tc"] Nov 22 10:52:34 crc kubenswrapper[4926]: W1122 10:52:34.005354 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d4b05be_2133_4a40_a7e8_7e4b49f4c0bc.slice/crio-986a2d2ef1617727adcccbf2d1e96e77ff8646547d890eaf837f87823e318a64 WatchSource:0}: Error finding container 986a2d2ef1617727adcccbf2d1e96e77ff8646547d890eaf837f87823e318a64: Status 404 returned error can't find the container with id 986a2d2ef1617727adcccbf2d1e96e77ff8646547d890eaf837f87823e318a64 Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.012556 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:34 crc kubenswrapper[4926]: E1122 10:52:34.012731 4926 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 10:52:34 crc kubenswrapper[4926]: E1122 10:52:34.012793 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist podName:1302de5c-2784-4974-b2ac-3572fc73e1d9 nodeName:}" failed. No retries permitted until 2025-11-22 10:52:35.012772486 +0000 UTC m=+775.314377783 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist") pod "speaker-xg4ns" (UID: "1302de5c-2784-4974-b2ac-3572fc73e1d9") : secret "metallb-memberlist" not found Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.981553 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-pv5tc" event={"ID":"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc","Type":"ContainerStarted","Data":"ed6a3ac5a0a611ba9d00d548e40f08e0852e6c245728026c6112660a29740ff9"} Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.981834 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-pv5tc" event={"ID":"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc","Type":"ContainerStarted","Data":"fb5446f0ab6ccd572d1903947ab45673277dd59730c3ea2a9a55e061d7ef7c98"} Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.981848 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-pv5tc" event={"ID":"1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc","Type":"ContainerStarted","Data":"986a2d2ef1617727adcccbf2d1e96e77ff8646547d890eaf837f87823e318a64"} Nov 22 10:52:34 crc kubenswrapper[4926]: I1122 10:52:34.981907 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.000045 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-pv5tc" podStartSLOduration=2.000023963 podStartE2EDuration="2.000023963s" podCreationTimestamp="2025-11-22 10:52:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:52:34.998771717 +0000 UTC m=+775.300377024" watchObservedRunningTime="2025-11-22 10:52:35.000023963 +0000 UTC m=+775.301629280" Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.026900 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.037781 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1302de5c-2784-4974-b2ac-3572fc73e1d9-memberlist\") pod \"speaker-xg4ns\" (UID: \"1302de5c-2784-4974-b2ac-3572fc73e1d9\") " pod="metallb-system/speaker-xg4ns" Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.092547 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-xg4ns" Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.990936 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xg4ns" event={"ID":"1302de5c-2784-4974-b2ac-3572fc73e1d9","Type":"ContainerStarted","Data":"392a16d31cc4ef64948bf21f749f0ea87948b4d043a3f5d9470d8fa440ddfbd6"} Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.991007 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xg4ns" event={"ID":"1302de5c-2784-4974-b2ac-3572fc73e1d9","Type":"ContainerStarted","Data":"5dd1d38f9ed9aa7c2a4f751c0a242e0a1ed87a272b8f6160c9d294bf4bbf0e8f"} Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.991023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-xg4ns" event={"ID":"1302de5c-2784-4974-b2ac-3572fc73e1d9","Type":"ContainerStarted","Data":"9c1bd7ef73326137814692c9373e9ceb4e23c41ce3410fb32d846afafe55550f"} Nov 22 10:52:35 crc kubenswrapper[4926]: I1122 10:52:35.991298 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-xg4ns" Nov 22 10:52:36 crc kubenswrapper[4926]: I1122 10:52:36.017051 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-xg4ns" podStartSLOduration=3.017036188 podStartE2EDuration="3.017036188s" podCreationTimestamp="2025-11-22 10:52:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:52:36.014230328 +0000 UTC m=+776.315835625" watchObservedRunningTime="2025-11-22 10:52:36.017036188 +0000 UTC m=+776.318641475" Nov 22 10:52:39 crc kubenswrapper[4926]: I1122 10:52:39.661264 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:52:39 crc kubenswrapper[4926]: I1122 10:52:39.661769 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:52:39 crc kubenswrapper[4926]: I1122 10:52:39.661810 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:52:39 crc kubenswrapper[4926]: I1122 10:52:39.662426 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:52:39 crc kubenswrapper[4926]: I1122 10:52:39.662473 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8" gracePeriod=600 Nov 22 10:52:40 crc kubenswrapper[4926]: I1122 10:52:40.019635 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8" exitCode=0 Nov 22 10:52:40 crc kubenswrapper[4926]: I1122 10:52:40.019674 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8"} Nov 22 10:52:40 crc kubenswrapper[4926]: I1122 10:52:40.019706 4926 scope.go:117] "RemoveContainer" containerID="092030f4c6b7032bf4d8e4be18cfe7552dfcee8631f351b9cef2ec2df961d885" Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.032968 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e"} Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.035950 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" event={"ID":"e8452d37-6eed-427b-9741-bda6aea54331","Type":"ContainerStarted","Data":"3bd4731cb4b3a02e10d508eab6d115931b9f6a40fbf141c907a40f5a56816f70"} Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.036147 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.038162 4926 generic.go:334] "Generic (PLEG): container finished" podID="493e392b-c61f-4115-abdf-42a9c2febe81" containerID="65c663c36a4cde454973a88f87e3300db1238f5d699e4b4a16769be8674ddb40" exitCode=0 Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.038204 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerDied","Data":"65c663c36a4cde454973a88f87e3300db1238f5d699e4b4a16769be8674ddb40"} Nov 22 10:52:41 crc kubenswrapper[4926]: I1122 10:52:41.109753 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" podStartSLOduration=1.970275936 podStartE2EDuration="8.109736291s" podCreationTimestamp="2025-11-22 10:52:33 +0000 UTC" firstStartedPulling="2025-11-22 10:52:33.896148315 +0000 UTC m=+774.197753622" lastFinishedPulling="2025-11-22 10:52:40.03560869 +0000 UTC m=+780.337213977" observedRunningTime="2025-11-22 10:52:41.10934717 +0000 UTC m=+781.410952487" watchObservedRunningTime="2025-11-22 10:52:41.109736291 +0000 UTC m=+781.411341568" Nov 22 10:52:42 crc kubenswrapper[4926]: I1122 10:52:42.047592 4926 generic.go:334] "Generic (PLEG): container finished" podID="493e392b-c61f-4115-abdf-42a9c2febe81" containerID="6ea51b342a7cf3f9535c80be0be36860bbf5d689e2c7983e3a5dd8795cb6ffcd" exitCode=0 Nov 22 10:52:42 crc kubenswrapper[4926]: I1122 10:52:42.047711 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerDied","Data":"6ea51b342a7cf3f9535c80be0be36860bbf5d689e2c7983e3a5dd8795cb6ffcd"} Nov 22 10:52:44 crc kubenswrapper[4926]: I1122 10:52:44.064807 4926 generic.go:334] "Generic (PLEG): container finished" podID="493e392b-c61f-4115-abdf-42a9c2febe81" containerID="0c8f571b327f5cfbc723bbcd741153873fc37e48adba6c1739349f0cbd2c3e81" exitCode=0 Nov 22 10:52:44 crc kubenswrapper[4926]: I1122 10:52:44.064989 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerDied","Data":"0c8f571b327f5cfbc723bbcd741153873fc37e48adba6c1739349f0cbd2c3e81"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.078705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"d10e85f292bd2b0e178cd09d410da7fdaedde83637c39d6e15baee1afdd98dc9"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.079094 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"3abe099481b403b4c58cb901ecd96bd1eabf0d6702b31ab78fd71ba5aa87339c"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.079115 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"2a46ecddb37dee70339097aa4273ba34b2b6c2496dca244b8b7e4d71a7b08630"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.079127 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"ab5bb37e812b1941a7a08a77854bec0adec51e9683169a6a44a8b6d8cdbd2138"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.079142 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"f305648e1293bf56dc5c13356b8ab280c0f4a78254ebfd9732533e01933ee5e5"} Nov 22 10:52:45 crc kubenswrapper[4926]: I1122 10:52:45.096764 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-xg4ns" Nov 22 10:52:46 crc kubenswrapper[4926]: I1122 10:52:46.094292 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pb2r5" event={"ID":"493e392b-c61f-4115-abdf-42a9c2febe81","Type":"ContainerStarted","Data":"47e35ad40a7e4f0f4a4dfac37ab1d4eb4ae1bff2b0826d75ba1b723cdd203270"} Nov 22 10:52:46 crc kubenswrapper[4926]: I1122 10:52:46.094674 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:46 crc kubenswrapper[4926]: I1122 10:52:46.138863 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-pb2r5" podStartSLOduration=6.710585159 podStartE2EDuration="13.13884651s" podCreationTimestamp="2025-11-22 10:52:33 +0000 UTC" firstStartedPulling="2025-11-22 10:52:33.630803728 +0000 UTC m=+773.932409015" lastFinishedPulling="2025-11-22 10:52:40.059065079 +0000 UTC m=+780.360670366" observedRunningTime="2025-11-22 10:52:46.137640965 +0000 UTC m=+786.439246262" watchObservedRunningTime="2025-11-22 10:52:46.13884651 +0000 UTC m=+786.440451797" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.162189 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.164060 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.166929 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.168179 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.193571 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.304608 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88wmd\" (UniqueName: \"kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd\") pod \"openstack-operator-index-w8xdn\" (UID: \"4c76b6dd-7370-4582-becf-30a462e9f618\") " pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.406080 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88wmd\" (UniqueName: \"kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd\") pod \"openstack-operator-index-w8xdn\" (UID: \"4c76b6dd-7370-4582-becf-30a462e9f618\") " pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.422646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88wmd\" (UniqueName: \"kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd\") pod \"openstack-operator-index-w8xdn\" (UID: \"4c76b6dd-7370-4582-becf-30a462e9f618\") " pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.435341 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.469743 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.492910 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:48 crc kubenswrapper[4926]: I1122 10:52:48.904261 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:49 crc kubenswrapper[4926]: I1122 10:52:49.117748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-w8xdn" event={"ID":"4c76b6dd-7370-4582-becf-30a462e9f618","Type":"ContainerStarted","Data":"6af43e51b95c5411562128499f84d7b39a2eb4f1205e8f101fa0554823cc7fa3"} Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.132696 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-w8xdn" event={"ID":"4c76b6dd-7370-4582-becf-30a462e9f618","Type":"ContainerStarted","Data":"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966"} Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.149971 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-w8xdn" podStartSLOduration=1.123398546 podStartE2EDuration="3.149944994s" podCreationTimestamp="2025-11-22 10:52:48 +0000 UTC" firstStartedPulling="2025-11-22 10:52:48.916493162 +0000 UTC m=+789.218098449" lastFinishedPulling="2025-11-22 10:52:50.94303961 +0000 UTC m=+791.244644897" observedRunningTime="2025-11-22 10:52:51.14488224 +0000 UTC m=+791.446487537" watchObservedRunningTime="2025-11-22 10:52:51.149944994 +0000 UTC m=+791.451550291" Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.336182 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.936059 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-56rwr"] Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.936947 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.940037 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-44dww" Nov 22 10:52:51 crc kubenswrapper[4926]: I1122 10:52:51.947031 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-56rwr"] Nov 22 10:52:52 crc kubenswrapper[4926]: I1122 10:52:52.060189 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p55d4\" (UniqueName: \"kubernetes.io/projected/cc73291c-a3b1-4641-95a2-454130fe25f5-kube-api-access-p55d4\") pod \"openstack-operator-index-56rwr\" (UID: \"cc73291c-a3b1-4641-95a2-454130fe25f5\") " pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:52:52 crc kubenswrapper[4926]: I1122 10:52:52.161437 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p55d4\" (UniqueName: \"kubernetes.io/projected/cc73291c-a3b1-4641-95a2-454130fe25f5-kube-api-access-p55d4\") pod \"openstack-operator-index-56rwr\" (UID: \"cc73291c-a3b1-4641-95a2-454130fe25f5\") " pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:52:52 crc kubenswrapper[4926]: I1122 10:52:52.184477 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p55d4\" (UniqueName: \"kubernetes.io/projected/cc73291c-a3b1-4641-95a2-454130fe25f5-kube-api-access-p55d4\") pod \"openstack-operator-index-56rwr\" (UID: \"cc73291c-a3b1-4641-95a2-454130fe25f5\") " pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:52:52 crc kubenswrapper[4926]: I1122 10:52:52.253404 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:52:52 crc kubenswrapper[4926]: I1122 10:52:52.703984 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-56rwr"] Nov 22 10:52:52 crc kubenswrapper[4926]: W1122 10:52:52.711590 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcc73291c_a3b1_4641_95a2_454130fe25f5.slice/crio-0e3dbc695d90d506648adffd773e18cb638b955fe5e729e8109b058fea781c0e WatchSource:0}: Error finding container 0e3dbc695d90d506648adffd773e18cb638b955fe5e729e8109b058fea781c0e: Status 404 returned error can't find the container with id 0e3dbc695d90d506648adffd773e18cb638b955fe5e729e8109b058fea781c0e Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.160675 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-56rwr" event={"ID":"cc73291c-a3b1-4641-95a2-454130fe25f5","Type":"ContainerStarted","Data":"2a6e319164d4ac6c2585f81caaf6b33e40753e98ec5e7e61abb63b1a92e2d30f"} Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.160738 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-56rwr" event={"ID":"cc73291c-a3b1-4641-95a2-454130fe25f5","Type":"ContainerStarted","Data":"0e3dbc695d90d506648adffd773e18cb638b955fe5e729e8109b058fea781c0e"} Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.162609 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-w8xdn" podUID="4c76b6dd-7370-4582-becf-30a462e9f618" containerName="registry-server" containerID="cri-o://55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966" gracePeriod=2 Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.193514 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-56rwr" podStartSLOduration=2.145002374 podStartE2EDuration="2.193477467s" podCreationTimestamp="2025-11-22 10:52:51 +0000 UTC" firstStartedPulling="2025-11-22 10:52:52.715288003 +0000 UTC m=+793.016893290" lastFinishedPulling="2025-11-22 10:52:52.763763096 +0000 UTC m=+793.065368383" observedRunningTime="2025-11-22 10:52:53.188761533 +0000 UTC m=+793.490366860" watchObservedRunningTime="2025-11-22 10:52:53.193477467 +0000 UTC m=+793.495082794" Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.460823 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-ndwkc" Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.587881 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.612827 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-pv5tc" Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.682393 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88wmd\" (UniqueName: \"kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd\") pod \"4c76b6dd-7370-4582-becf-30a462e9f618\" (UID: \"4c76b6dd-7370-4582-becf-30a462e9f618\") " Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.686601 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd" (OuterVolumeSpecName: "kube-api-access-88wmd") pod "4c76b6dd-7370-4582-becf-30a462e9f618" (UID: "4c76b6dd-7370-4582-becf-30a462e9f618"). InnerVolumeSpecName "kube-api-access-88wmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:52:53 crc kubenswrapper[4926]: I1122 10:52:53.784215 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88wmd\" (UniqueName: \"kubernetes.io/projected/4c76b6dd-7370-4582-becf-30a462e9f618-kube-api-access-88wmd\") on node \"crc\" DevicePath \"\"" Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.170849 4926 generic.go:334] "Generic (PLEG): container finished" podID="4c76b6dd-7370-4582-becf-30a462e9f618" containerID="55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966" exitCode=0 Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.171041 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-w8xdn" event={"ID":"4c76b6dd-7370-4582-becf-30a462e9f618","Type":"ContainerDied","Data":"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966"} Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.171143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-w8xdn" event={"ID":"4c76b6dd-7370-4582-becf-30a462e9f618","Type":"ContainerDied","Data":"6af43e51b95c5411562128499f84d7b39a2eb4f1205e8f101fa0554823cc7fa3"} Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.171167 4926 scope.go:117] "RemoveContainer" containerID="55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966" Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.171199 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-w8xdn" Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.201371 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.205106 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-w8xdn"] Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.207306 4926 scope.go:117] "RemoveContainer" containerID="55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966" Nov 22 10:52:54 crc kubenswrapper[4926]: E1122 10:52:54.207828 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966\": container with ID starting with 55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966 not found: ID does not exist" containerID="55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966" Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.207874 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966"} err="failed to get container status \"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966\": rpc error: code = NotFound desc = could not find container \"55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966\": container with ID starting with 55189a4f086bb22033aa1cef4fda0d81ad2a8472a3e2073b8144b6b475c76966 not found: ID does not exist" Nov 22 10:52:54 crc kubenswrapper[4926]: I1122 10:52:54.594455 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c76b6dd-7370-4582-becf-30a462e9f618" path="/var/lib/kubelet/pods/4c76b6dd-7370-4582-becf-30a462e9f618/volumes" Nov 22 10:53:02 crc kubenswrapper[4926]: I1122 10:53:02.254037 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:53:02 crc kubenswrapper[4926]: I1122 10:53:02.254616 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:53:02 crc kubenswrapper[4926]: I1122 10:53:02.285954 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:53:03 crc kubenswrapper[4926]: I1122 10:53:03.252366 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-56rwr" Nov 22 10:53:03 crc kubenswrapper[4926]: I1122 10:53:03.439697 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pb2r5" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.757893 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v"] Nov 22 10:53:09 crc kubenswrapper[4926]: E1122 10:53:09.758558 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c76b6dd-7370-4582-becf-30a462e9f618" containerName="registry-server" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.758570 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c76b6dd-7370-4582-becf-30a462e9f618" containerName="registry-server" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.758677 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c76b6dd-7370-4582-becf-30a462e9f618" containerName="registry-server" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.759489 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.761704 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-zhvxn" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.772770 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v"] Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.925373 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.925476 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:09 crc kubenswrapper[4926]: I1122 10:53:09.925546 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbq6h\" (UniqueName: \"kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.026969 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbq6h\" (UniqueName: \"kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.027072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.027119 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.027615 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.028248 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.047740 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbq6h\" (UniqueName: \"kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h\") pod \"236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.077412 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:10 crc kubenswrapper[4926]: I1122 10:53:10.471985 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v"] Nov 22 10:53:10 crc kubenswrapper[4926]: W1122 10:53:10.481978 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0379bb7b_1539_4a2f_888d_fc7bd9828a33.slice/crio-12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a WatchSource:0}: Error finding container 12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a: Status 404 returned error can't find the container with id 12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a Nov 22 10:53:11 crc kubenswrapper[4926]: I1122 10:53:11.283269 4926 generic.go:334] "Generic (PLEG): container finished" podID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerID="ff51287ab94bff83a3da620f5fecaa6173e9b556641d670c8793fae1f961c33f" exitCode=0 Nov 22 10:53:11 crc kubenswrapper[4926]: I1122 10:53:11.283310 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" event={"ID":"0379bb7b-1539-4a2f-888d-fc7bd9828a33","Type":"ContainerDied","Data":"ff51287ab94bff83a3da620f5fecaa6173e9b556641d670c8793fae1f961c33f"} Nov 22 10:53:11 crc kubenswrapper[4926]: I1122 10:53:11.283607 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" event={"ID":"0379bb7b-1539-4a2f-888d-fc7bd9828a33","Type":"ContainerStarted","Data":"12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a"} Nov 22 10:53:12 crc kubenswrapper[4926]: I1122 10:53:12.291324 4926 generic.go:334] "Generic (PLEG): container finished" podID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerID="1fdc6dacf20d5246c0266890c232d01f548011fc41f619ed2d86f5b0f692c44a" exitCode=0 Nov 22 10:53:12 crc kubenswrapper[4926]: I1122 10:53:12.291407 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" event={"ID":"0379bb7b-1539-4a2f-888d-fc7bd9828a33","Type":"ContainerDied","Data":"1fdc6dacf20d5246c0266890c232d01f548011fc41f619ed2d86f5b0f692c44a"} Nov 22 10:53:13 crc kubenswrapper[4926]: I1122 10:53:13.301006 4926 generic.go:334] "Generic (PLEG): container finished" podID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerID="99060b22784b76194b0a6f3b72a4b85f8b0a78be3f5afa5c33070ecc3247574a" exitCode=0 Nov 22 10:53:13 crc kubenswrapper[4926]: I1122 10:53:13.301078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" event={"ID":"0379bb7b-1539-4a2f-888d-fc7bd9828a33","Type":"ContainerDied","Data":"99060b22784b76194b0a6f3b72a4b85f8b0a78be3f5afa5c33070ecc3247574a"} Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.579149 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.686548 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbq6h\" (UniqueName: \"kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h\") pod \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.686730 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util\") pod \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.686843 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle\") pod \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\" (UID: \"0379bb7b-1539-4a2f-888d-fc7bd9828a33\") " Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.688256 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle" (OuterVolumeSpecName: "bundle") pod "0379bb7b-1539-4a2f-888d-fc7bd9828a33" (UID: "0379bb7b-1539-4a2f-888d-fc7bd9828a33"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.688750 4926 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.693178 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h" (OuterVolumeSpecName: "kube-api-access-qbq6h") pod "0379bb7b-1539-4a2f-888d-fc7bd9828a33" (UID: "0379bb7b-1539-4a2f-888d-fc7bd9828a33"). InnerVolumeSpecName "kube-api-access-qbq6h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.711609 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util" (OuterVolumeSpecName: "util") pod "0379bb7b-1539-4a2f-888d-fc7bd9828a33" (UID: "0379bb7b-1539-4a2f-888d-fc7bd9828a33"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.790185 4926 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0379bb7b-1539-4a2f-888d-fc7bd9828a33-util\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:14 crc kubenswrapper[4926]: I1122 10:53:14.790986 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbq6h\" (UniqueName: \"kubernetes.io/projected/0379bb7b-1539-4a2f-888d-fc7bd9828a33-kube-api-access-qbq6h\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:15 crc kubenswrapper[4926]: I1122 10:53:15.317535 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" event={"ID":"0379bb7b-1539-4a2f-888d-fc7bd9828a33","Type":"ContainerDied","Data":"12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a"} Nov 22 10:53:15 crc kubenswrapper[4926]: I1122 10:53:15.317590 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="12da22a963785d1b82137e3d92fc124651bcce63fcdb612f7d6b543f564f2a8a" Nov 22 10:53:15 crc kubenswrapper[4926]: I1122 10:53:15.317567 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.315148 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8"] Nov 22 10:53:22 crc kubenswrapper[4926]: E1122 10:53:22.329040 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="pull" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.329081 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="pull" Nov 22 10:53:22 crc kubenswrapper[4926]: E1122 10:53:22.329101 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="extract" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.329108 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="extract" Nov 22 10:53:22 crc kubenswrapper[4926]: E1122 10:53:22.329117 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="util" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.329123 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="util" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.329282 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0379bb7b-1539-4a2f-888d-fc7bd9828a33" containerName="extract" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.335494 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.337994 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8"] Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.338337 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-m74g2" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.505852 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxgjt\" (UniqueName: \"kubernetes.io/projected/391d1daa-3379-45e6-be55-fb2c3e1d304a-kube-api-access-wxgjt\") pod \"openstack-operator-controller-operator-76ffdb7f4-g9zx8\" (UID: \"391d1daa-3379-45e6-be55-fb2c3e1d304a\") " pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.607712 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxgjt\" (UniqueName: \"kubernetes.io/projected/391d1daa-3379-45e6-be55-fb2c3e1d304a-kube-api-access-wxgjt\") pod \"openstack-operator-controller-operator-76ffdb7f4-g9zx8\" (UID: \"391d1daa-3379-45e6-be55-fb2c3e1d304a\") " pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.631180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxgjt\" (UniqueName: \"kubernetes.io/projected/391d1daa-3379-45e6-be55-fb2c3e1d304a-kube-api-access-wxgjt\") pod \"openstack-operator-controller-operator-76ffdb7f4-g9zx8\" (UID: \"391d1daa-3379-45e6-be55-fb2c3e1d304a\") " pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:22 crc kubenswrapper[4926]: I1122 10:53:22.662999 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:23 crc kubenswrapper[4926]: I1122 10:53:23.094607 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8"] Nov 22 10:53:23 crc kubenswrapper[4926]: I1122 10:53:23.366723 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" event={"ID":"391d1daa-3379-45e6-be55-fb2c3e1d304a","Type":"ContainerStarted","Data":"b36a93ebe11d42ba4c755167727634c775e235094d1d36be698bb3a1827604f3"} Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.804616 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.806015 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.814311 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.870105 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxxcv\" (UniqueName: \"kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.870149 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.870190 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.971486 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.971534 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxxcv\" (UniqueName: \"kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.971564 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.972044 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.972134 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:26 crc kubenswrapper[4926]: I1122 10:53:26.992052 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxxcv\" (UniqueName: \"kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv\") pod \"redhat-operators-6td95\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:27 crc kubenswrapper[4926]: I1122 10:53:27.146998 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:27 crc kubenswrapper[4926]: I1122 10:53:27.396316 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" event={"ID":"391d1daa-3379-45e6-be55-fb2c3e1d304a","Type":"ContainerStarted","Data":"7182dd3cbefa9a52b006aeaab992b4d91b33f2113922b71464a0a14080907557"} Nov 22 10:53:27 crc kubenswrapper[4926]: I1122 10:53:27.551616 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:27 crc kubenswrapper[4926]: W1122 10:53:27.977919 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a20527f_9f25_42ce_b315_1863d80188e6.slice/crio-abc409f11922f30ed43ba2d6ebae53c2070e8403e50385e9327d8de46cc4010e WatchSource:0}: Error finding container abc409f11922f30ed43ba2d6ebae53c2070e8403e50385e9327d8de46cc4010e: Status 404 returned error can't find the container with id abc409f11922f30ed43ba2d6ebae53c2070e8403e50385e9327d8de46cc4010e Nov 22 10:53:28 crc kubenswrapper[4926]: I1122 10:53:28.402025 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerStarted","Data":"abc409f11922f30ed43ba2d6ebae53c2070e8403e50385e9327d8de46cc4010e"} Nov 22 10:53:29 crc kubenswrapper[4926]: I1122 10:53:29.408094 4926 generic.go:334] "Generic (PLEG): container finished" podID="3a20527f-9f25-42ce-b315-1863d80188e6" containerID="bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f" exitCode=0 Nov 22 10:53:29 crc kubenswrapper[4926]: I1122 10:53:29.408178 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerDied","Data":"bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f"} Nov 22 10:53:29 crc kubenswrapper[4926]: I1122 10:53:29.411445 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" event={"ID":"391d1daa-3379-45e6-be55-fb2c3e1d304a","Type":"ContainerStarted","Data":"70a9d5fa63b7ec6a1195347a78b63b9e1b5050cdbc63f17314465d1d97902ed9"} Nov 22 10:53:29 crc kubenswrapper[4926]: I1122 10:53:29.411641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:29 crc kubenswrapper[4926]: I1122 10:53:29.460112 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" podStartSLOduration=1.463530817 podStartE2EDuration="7.460096872s" podCreationTimestamp="2025-11-22 10:53:22 +0000 UTC" firstStartedPulling="2025-11-22 10:53:23.101398083 +0000 UTC m=+823.403003390" lastFinishedPulling="2025-11-22 10:53:29.097964158 +0000 UTC m=+829.399569445" observedRunningTime="2025-11-22 10:53:29.450272862 +0000 UTC m=+829.751878149" watchObservedRunningTime="2025-11-22 10:53:29.460096872 +0000 UTC m=+829.761702149" Nov 22 10:53:30 crc kubenswrapper[4926]: I1122 10:53:30.419672 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerStarted","Data":"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab"} Nov 22 10:53:31 crc kubenswrapper[4926]: I1122 10:53:31.426485 4926 generic.go:334] "Generic (PLEG): container finished" podID="3a20527f-9f25-42ce-b315-1863d80188e6" containerID="098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab" exitCode=0 Nov 22 10:53:31 crc kubenswrapper[4926]: I1122 10:53:31.426528 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerDied","Data":"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab"} Nov 22 10:53:32 crc kubenswrapper[4926]: I1122 10:53:32.434755 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerStarted","Data":"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea"} Nov 22 10:53:32 crc kubenswrapper[4926]: I1122 10:53:32.665771 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-76ffdb7f4-g9zx8" Nov 22 10:53:32 crc kubenswrapper[4926]: I1122 10:53:32.699493 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6td95" podStartSLOduration=4.185601274 podStartE2EDuration="6.69947625s" podCreationTimestamp="2025-11-22 10:53:26 +0000 UTC" firstStartedPulling="2025-11-22 10:53:29.409396245 +0000 UTC m=+829.711001532" lastFinishedPulling="2025-11-22 10:53:31.923271201 +0000 UTC m=+832.224876508" observedRunningTime="2025-11-22 10:53:32.452859763 +0000 UTC m=+832.754465120" watchObservedRunningTime="2025-11-22 10:53:32.69947625 +0000 UTC m=+833.001081537" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.649146 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.651011 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.674160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7bsq\" (UniqueName: \"kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.674244 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.674330 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.681738 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.775183 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7bsq\" (UniqueName: \"kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.775458 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.775609 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.776363 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.776365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.793934 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7bsq\" (UniqueName: \"kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq\") pod \"community-operators-k52kt\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:35 crc kubenswrapper[4926]: I1122 10:53:35.990528 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:36 crc kubenswrapper[4926]: I1122 10:53:36.488065 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:36 crc kubenswrapper[4926]: W1122 10:53:36.492541 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e35ef25_8434_47d1_be37_2238fa98073d.slice/crio-cb4b1feb365d3812d0dff5ae121fb9ead9bca3b72b73c9f599582026cbf38f33 WatchSource:0}: Error finding container cb4b1feb365d3812d0dff5ae121fb9ead9bca3b72b73c9f599582026cbf38f33: Status 404 returned error can't find the container with id cb4b1feb365d3812d0dff5ae121fb9ead9bca3b72b73c9f599582026cbf38f33 Nov 22 10:53:37 crc kubenswrapper[4926]: I1122 10:53:37.148147 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:37 crc kubenswrapper[4926]: I1122 10:53:37.148496 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:37 crc kubenswrapper[4926]: I1122 10:53:37.467035 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e35ef25-8434-47d1-be37-2238fa98073d" containerID="d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06" exitCode=0 Nov 22 10:53:37 crc kubenswrapper[4926]: I1122 10:53:37.467070 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerDied","Data":"d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06"} Nov 22 10:53:37 crc kubenswrapper[4926]: I1122 10:53:37.467111 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerStarted","Data":"cb4b1feb365d3812d0dff5ae121fb9ead9bca3b72b73c9f599582026cbf38f33"} Nov 22 10:53:38 crc kubenswrapper[4926]: I1122 10:53:38.185454 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6td95" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="registry-server" probeResult="failure" output=< Nov 22 10:53:38 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 10:53:38 crc kubenswrapper[4926]: > Nov 22 10:53:38 crc kubenswrapper[4926]: I1122 10:53:38.474436 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerStarted","Data":"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590"} Nov 22 10:53:39 crc kubenswrapper[4926]: I1122 10:53:39.483370 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e35ef25-8434-47d1-be37-2238fa98073d" containerID="61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590" exitCode=0 Nov 22 10:53:39 crc kubenswrapper[4926]: I1122 10:53:39.483428 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerDied","Data":"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590"} Nov 22 10:53:41 crc kubenswrapper[4926]: I1122 10:53:41.377243 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerStarted","Data":"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e"} Nov 22 10:53:41 crc kubenswrapper[4926]: I1122 10:53:41.396689 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k52kt" podStartSLOduration=4.012244539 podStartE2EDuration="6.39667376s" podCreationTimestamp="2025-11-22 10:53:35 +0000 UTC" firstStartedPulling="2025-11-22 10:53:37.468813736 +0000 UTC m=+837.770419023" lastFinishedPulling="2025-11-22 10:53:39.853242957 +0000 UTC m=+840.154848244" observedRunningTime="2025-11-22 10:53:41.394596471 +0000 UTC m=+841.696201778" watchObservedRunningTime="2025-11-22 10:53:41.39667376 +0000 UTC m=+841.698279057" Nov 22 10:53:45 crc kubenswrapper[4926]: I1122 10:53:45.990993 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:45 crc kubenswrapper[4926]: I1122 10:53:45.991800 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:46 crc kubenswrapper[4926]: I1122 10:53:46.062662 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:46 crc kubenswrapper[4926]: I1122 10:53:46.476895 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:46 crc kubenswrapper[4926]: I1122 10:53:46.536791 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:47 crc kubenswrapper[4926]: I1122 10:53:47.201416 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:47 crc kubenswrapper[4926]: I1122 10:53:47.259360 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:48 crc kubenswrapper[4926]: I1122 10:53:48.426119 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k52kt" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="registry-server" containerID="cri-o://a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e" gracePeriod=2 Nov 22 10:53:48 crc kubenswrapper[4926]: I1122 10:53:48.704022 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:48 crc kubenswrapper[4926]: I1122 10:53:48.704281 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6td95" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="registry-server" containerID="cri-o://b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea" gracePeriod=2 Nov 22 10:53:48 crc kubenswrapper[4926]: I1122 10:53:48.855313 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.022447 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities\") pod \"5e35ef25-8434-47d1-be37-2238fa98073d\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.022532 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content\") pod \"5e35ef25-8434-47d1-be37-2238fa98073d\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.022612 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7bsq\" (UniqueName: \"kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq\") pod \"5e35ef25-8434-47d1-be37-2238fa98073d\" (UID: \"5e35ef25-8434-47d1-be37-2238fa98073d\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.023349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities" (OuterVolumeSpecName: "utilities") pod "5e35ef25-8434-47d1-be37-2238fa98073d" (UID: "5e35ef25-8434-47d1-be37-2238fa98073d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.028136 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq" (OuterVolumeSpecName: "kube-api-access-j7bsq") pod "5e35ef25-8434-47d1-be37-2238fa98073d" (UID: "5e35ef25-8434-47d1-be37-2238fa98073d"). InnerVolumeSpecName "kube-api-access-j7bsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.071023 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.074021 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e35ef25-8434-47d1-be37-2238fa98073d" (UID: "5e35ef25-8434-47d1-be37-2238fa98073d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123380 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content\") pod \"3a20527f-9f25-42ce-b315-1863d80188e6\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123432 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities\") pod \"3a20527f-9f25-42ce-b315-1863d80188e6\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123456 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxxcv\" (UniqueName: \"kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv\") pod \"3a20527f-9f25-42ce-b315-1863d80188e6\" (UID: \"3a20527f-9f25-42ce-b315-1863d80188e6\") " Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123663 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123678 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7bsq\" (UniqueName: \"kubernetes.io/projected/5e35ef25-8434-47d1-be37-2238fa98073d-kube-api-access-j7bsq\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.123691 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e35ef25-8434-47d1-be37-2238fa98073d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.124522 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities" (OuterVolumeSpecName: "utilities") pod "3a20527f-9f25-42ce-b315-1863d80188e6" (UID: "3a20527f-9f25-42ce-b315-1863d80188e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.127019 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv" (OuterVolumeSpecName: "kube-api-access-jxxcv") pod "3a20527f-9f25-42ce-b315-1863d80188e6" (UID: "3a20527f-9f25-42ce-b315-1863d80188e6"). InnerVolumeSpecName "kube-api-access-jxxcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.206841 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a20527f-9f25-42ce-b315-1863d80188e6" (UID: "3a20527f-9f25-42ce-b315-1863d80188e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.224559 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.224582 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxxcv\" (UniqueName: \"kubernetes.io/projected/3a20527f-9f25-42ce-b315-1863d80188e6-kube-api-access-jxxcv\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.224594 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a20527f-9f25-42ce-b315-1863d80188e6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.437531 4926 generic.go:334] "Generic (PLEG): container finished" podID="5e35ef25-8434-47d1-be37-2238fa98073d" containerID="a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e" exitCode=0 Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.437690 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerDied","Data":"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e"} Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.437741 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k52kt" event={"ID":"5e35ef25-8434-47d1-be37-2238fa98073d","Type":"ContainerDied","Data":"cb4b1feb365d3812d0dff5ae121fb9ead9bca3b72b73c9f599582026cbf38f33"} Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.437751 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k52kt" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.437781 4926 scope.go:117] "RemoveContainer" containerID="a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.441998 4926 generic.go:334] "Generic (PLEG): container finished" podID="3a20527f-9f25-42ce-b315-1863d80188e6" containerID="b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea" exitCode=0 Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.442041 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerDied","Data":"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea"} Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.442107 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6td95" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.442108 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6td95" event={"ID":"3a20527f-9f25-42ce-b315-1863d80188e6","Type":"ContainerDied","Data":"abc409f11922f30ed43ba2d6ebae53c2070e8403e50385e9327d8de46cc4010e"} Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.468968 4926 scope.go:117] "RemoveContainer" containerID="61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.494855 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.506154 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6td95"] Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.510990 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.516243 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k52kt"] Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.521031 4926 scope.go:117] "RemoveContainer" containerID="d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.538347 4926 scope.go:117] "RemoveContainer" containerID="a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.539406 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e\": container with ID starting with a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e not found: ID does not exist" containerID="a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.539454 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e"} err="failed to get container status \"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e\": rpc error: code = NotFound desc = could not find container \"a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e\": container with ID starting with a279ec623d81cffe6065d2bad0b3895c5ddecc5ac976f3f61da0289a239ce07e not found: ID does not exist" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.539485 4926 scope.go:117] "RemoveContainer" containerID="61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.540330 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590\": container with ID starting with 61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590 not found: ID does not exist" containerID="61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.540405 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590"} err="failed to get container status \"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590\": rpc error: code = NotFound desc = could not find container \"61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590\": container with ID starting with 61a384448570e7b4cf5d0f7c6af52fa6aa67a211d457ea9b5496c37f6efb5590 not found: ID does not exist" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.540463 4926 scope.go:117] "RemoveContainer" containerID="d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.541039 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06\": container with ID starting with d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06 not found: ID does not exist" containerID="d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.541069 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06"} err="failed to get container status \"d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06\": rpc error: code = NotFound desc = could not find container \"d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06\": container with ID starting with d18eb9b1bbd21e5889b3b5edf997ed73dc3d819b6571f82683e69cfd7864cd06 not found: ID does not exist" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.541083 4926 scope.go:117] "RemoveContainer" containerID="b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.571278 4926 scope.go:117] "RemoveContainer" containerID="098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.585626 4926 scope.go:117] "RemoveContainer" containerID="bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.603400 4926 scope.go:117] "RemoveContainer" containerID="b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.603860 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea\": container with ID starting with b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea not found: ID does not exist" containerID="b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.603951 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea"} err="failed to get container status \"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea\": rpc error: code = NotFound desc = could not find container \"b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea\": container with ID starting with b635e8ce76cfa5b61cf1315e2303f6d1b55a29028f7dfcf0be44b4a0383a42ea not found: ID does not exist" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.603981 4926 scope.go:117] "RemoveContainer" containerID="098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.604448 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab\": container with ID starting with 098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab not found: ID does not exist" containerID="098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.604507 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab"} err="failed to get container status \"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab\": rpc error: code = NotFound desc = could not find container \"098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab\": container with ID starting with 098e21fdd6a8880a1cd1443f5967d124ce55f19bfa35b7aa5a5258d1e180c1ab not found: ID does not exist" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.604549 4926 scope.go:117] "RemoveContainer" containerID="bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f" Nov 22 10:53:49 crc kubenswrapper[4926]: E1122 10:53:49.604935 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f\": container with ID starting with bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f not found: ID does not exist" containerID="bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f" Nov 22 10:53:49 crc kubenswrapper[4926]: I1122 10:53:49.604979 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f"} err="failed to get container status \"bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f\": rpc error: code = NotFound desc = could not find container \"bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f\": container with ID starting with bc526435fb38126c3b75754e197ebe19506eeea19f3df718e64708ded92ad62f not found: ID does not exist" Nov 22 10:53:50 crc kubenswrapper[4926]: I1122 10:53:50.601637 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" path="/var/lib/kubelet/pods/3a20527f-9f25-42ce-b315-1863d80188e6/volumes" Nov 22 10:53:50 crc kubenswrapper[4926]: I1122 10:53:50.603237 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" path="/var/lib/kubelet/pods/5e35ef25-8434-47d1-be37-2238fa98073d/volumes" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.916836 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918299 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918327 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918347 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918360 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918375 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918391 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918415 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918430 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="extract-utilities" Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918463 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918480 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: E1122 10:53:53.918511 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918526 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="extract-content" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918768 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a20527f-9f25-42ce-b315-1863d80188e6" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.918810 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e35ef25-8434-47d1-be37-2238fa98073d" containerName="registry-server" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.920782 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:53 crc kubenswrapper[4926]: I1122 10:53:53.940491 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.091869 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.091981 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.092013 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttsjc\" (UniqueName: \"kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.193415 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.193479 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttsjc\" (UniqueName: \"kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.193523 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.194154 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.194197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.221431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttsjc\" (UniqueName: \"kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc\") pod \"redhat-marketplace-jbrrc\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.254170 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:53:54 crc kubenswrapper[4926]: I1122 10:53:54.628903 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:53:55 crc kubenswrapper[4926]: I1122 10:53:55.486421 4926 generic.go:334] "Generic (PLEG): container finished" podID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerID="74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea" exitCode=0 Nov 22 10:53:55 crc kubenswrapper[4926]: I1122 10:53:55.486460 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerDied","Data":"74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea"} Nov 22 10:53:55 crc kubenswrapper[4926]: I1122 10:53:55.486484 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerStarted","Data":"a543444c1f06fd651e081443a222673f941aa8ea7d238b7d685d19a9aa1668af"} Nov 22 10:53:56 crc kubenswrapper[4926]: I1122 10:53:56.493857 4926 generic.go:334] "Generic (PLEG): container finished" podID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerID="6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d" exitCode=0 Nov 22 10:53:56 crc kubenswrapper[4926]: I1122 10:53:56.493919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerDied","Data":"6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d"} Nov 22 10:53:57 crc kubenswrapper[4926]: I1122 10:53:57.502532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerStarted","Data":"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843"} Nov 22 10:53:57 crc kubenswrapper[4926]: I1122 10:53:57.522283 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jbrrc" podStartSLOduration=3.037757853 podStartE2EDuration="4.522258504s" podCreationTimestamp="2025-11-22 10:53:53 +0000 UTC" firstStartedPulling="2025-11-22 10:53:55.487976215 +0000 UTC m=+855.789581502" lastFinishedPulling="2025-11-22 10:53:56.972476866 +0000 UTC m=+857.274082153" observedRunningTime="2025-11-22 10:53:57.520966857 +0000 UTC m=+857.822572164" watchObservedRunningTime="2025-11-22 10:53:57.522258504 +0000 UTC m=+857.823863811" Nov 22 10:54:04 crc kubenswrapper[4926]: I1122 10:54:04.254280 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:04 crc kubenswrapper[4926]: I1122 10:54:04.254958 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:04 crc kubenswrapper[4926]: I1122 10:54:04.314168 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:04 crc kubenswrapper[4926]: I1122 10:54:04.611630 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:04 crc kubenswrapper[4926]: I1122 10:54:04.676621 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.704681 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.705695 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.721366 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.721794 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-k8zmk" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.757193 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.758778 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.761460 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-z66f5" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.768389 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.769705 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.776817 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-s9cl9" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.781974 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.787967 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.791484 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-qshq6"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.792708 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.803580 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6qfgx" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.809006 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-qshq6"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.830318 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.831487 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.838353 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-2mj6s" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.850997 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.852296 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.857165 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-55gtg" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.859830 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.860999 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.864393 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.867551 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-msdtb" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.867974 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868571 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868603 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26jb8\" (UniqueName: \"kubernetes.io/projected/644aaf3f-48c2-4789-9775-18ed3ae24fd7-kube-api-access-26jb8\") pod \"cinder-operator-controller-manager-6498cbf48f-cttxc\" (UID: \"644aaf3f-48c2-4789-9775-18ed3ae24fd7\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868631 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w74tl\" (UniqueName: \"kubernetes.io/projected/f3502c04-7310-4659-aa47-b91b71ff3b30-kube-api-access-w74tl\") pod \"horizon-operator-controller-manager-598f69df5d-hls4w\" (UID: \"f3502c04-7310-4659-aa47-b91b71ff3b30\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzbn4\" (UniqueName: \"kubernetes.io/projected/dc80ed79-7a34-4756-b5ed-0b3cda532910-kube-api-access-dzbn4\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868681 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4jr6\" (UniqueName: \"kubernetes.io/projected/3ebbbdf8-da82-4f02-a8f5-509de3b56721-kube-api-access-g4jr6\") pod \"glance-operator-controller-manager-7969689c84-qshq6\" (UID: \"3ebbbdf8-da82-4f02-a8f5-509de3b56721\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868699 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7s26j\" (UniqueName: \"kubernetes.io/projected/f8406cda-67f4-425a-83f1-ab90cf4ebf0c-kube-api-access-7s26j\") pod \"barbican-operator-controller-manager-697c78f669-dfq9w\" (UID: \"f8406cda-67f4-425a-83f1-ab90cf4ebf0c\") " pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868725 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcctk\" (UniqueName: \"kubernetes.io/projected/02d4d3c4-4951-4f41-8605-239ac95dae92-kube-api-access-lcctk\") pod \"designate-operator-controller-manager-767ccfd65f-8p4kn\" (UID: \"02d4d3c4-4951-4f41-8605-239ac95dae92\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.868762 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r79jw\" (UniqueName: \"kubernetes.io/projected/bf6721b8-a1f6-4d27-ad5a-c090e2dc8806-kube-api-access-r79jw\") pod \"heat-operator-controller-manager-7869d7c46b-np8cn\" (UID: \"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806\") " pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.879948 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.899076 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.900288 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.904663 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-bmxz9" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.935114 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.955948 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970614 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26jb8\" (UniqueName: \"kubernetes.io/projected/644aaf3f-48c2-4789-9775-18ed3ae24fd7-kube-api-access-26jb8\") pod \"cinder-operator-controller-manager-6498cbf48f-cttxc\" (UID: \"644aaf3f-48c2-4789-9775-18ed3ae24fd7\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970672 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w74tl\" (UniqueName: \"kubernetes.io/projected/f3502c04-7310-4659-aa47-b91b71ff3b30-kube-api-access-w74tl\") pod \"horizon-operator-controller-manager-598f69df5d-hls4w\" (UID: \"f3502c04-7310-4659-aa47-b91b71ff3b30\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970726 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8j8p\" (UniqueName: \"kubernetes.io/projected/46528db3-6717-4abb-a779-33290ae0c986-kube-api-access-k8j8p\") pod \"ironic-operator-controller-manager-99b499f4-7j69z\" (UID: \"46528db3-6717-4abb-a779-33290ae0c986\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970758 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzbn4\" (UniqueName: \"kubernetes.io/projected/dc80ed79-7a34-4756-b5ed-0b3cda532910-kube-api-access-dzbn4\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4jr6\" (UniqueName: \"kubernetes.io/projected/3ebbbdf8-da82-4f02-a8f5-509de3b56721-kube-api-access-g4jr6\") pod \"glance-operator-controller-manager-7969689c84-qshq6\" (UID: \"3ebbbdf8-da82-4f02-a8f5-509de3b56721\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970814 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7s26j\" (UniqueName: \"kubernetes.io/projected/f8406cda-67f4-425a-83f1-ab90cf4ebf0c-kube-api-access-7s26j\") pod \"barbican-operator-controller-manager-697c78f669-dfq9w\" (UID: \"f8406cda-67f4-425a-83f1-ab90cf4ebf0c\") " pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970857 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcctk\" (UniqueName: \"kubernetes.io/projected/02d4d3c4-4951-4f41-8605-239ac95dae92-kube-api-access-lcctk\") pod \"designate-operator-controller-manager-767ccfd65f-8p4kn\" (UID: \"02d4d3c4-4951-4f41-8605-239ac95dae92\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970918 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r79jw\" (UniqueName: \"kubernetes.io/projected/bf6721b8-a1f6-4d27-ad5a-c090e2dc8806-kube-api-access-r79jw\") pod \"heat-operator-controller-manager-7869d7c46b-np8cn\" (UID: \"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806\") " pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.970957 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:05 crc kubenswrapper[4926]: E1122 10:54:05.971091 4926 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 22 10:54:05 crc kubenswrapper[4926]: E1122 10:54:05.971149 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert podName:dc80ed79-7a34-4756-b5ed-0b3cda532910 nodeName:}" failed. No retries permitted until 2025-11-22 10:54:06.471128497 +0000 UTC m=+866.772733784 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert") pod "infra-operator-controller-manager-7875d8bb94-pr7tn" (UID: "dc80ed79-7a34-4756-b5ed-0b3cda532910") : secret "infra-operator-webhook-server-cert" not found Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.977855 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.978799 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.983067 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-2kkzs" Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.990735 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj"] Nov 22 10:54:05 crc kubenswrapper[4926]: I1122 10:54:05.997791 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcctk\" (UniqueName: \"kubernetes.io/projected/02d4d3c4-4951-4f41-8605-239ac95dae92-kube-api-access-lcctk\") pod \"designate-operator-controller-manager-767ccfd65f-8p4kn\" (UID: \"02d4d3c4-4951-4f41-8605-239ac95dae92\") " pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.000615 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.002027 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.005317 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzbn4\" (UniqueName: \"kubernetes.io/projected/dc80ed79-7a34-4756-b5ed-0b3cda532910-kube-api-access-dzbn4\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.011852 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-656q9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.013129 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w74tl\" (UniqueName: \"kubernetes.io/projected/f3502c04-7310-4659-aa47-b91b71ff3b30-kube-api-access-w74tl\") pod \"horizon-operator-controller-manager-598f69df5d-hls4w\" (UID: \"f3502c04-7310-4659-aa47-b91b71ff3b30\") " pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.013204 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.014172 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.018613 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7s26j\" (UniqueName: \"kubernetes.io/projected/f8406cda-67f4-425a-83f1-ab90cf4ebf0c-kube-api-access-7s26j\") pod \"barbican-operator-controller-manager-697c78f669-dfq9w\" (UID: \"f8406cda-67f4-425a-83f1-ab90cf4ebf0c\") " pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.019121 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4jr6\" (UniqueName: \"kubernetes.io/projected/3ebbbdf8-da82-4f02-a8f5-509de3b56721-kube-api-access-g4jr6\") pod \"glance-operator-controller-manager-7969689c84-qshq6\" (UID: \"3ebbbdf8-da82-4f02-a8f5-509de3b56721\") " pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.019750 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-n4vj5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.020494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26jb8\" (UniqueName: \"kubernetes.io/projected/644aaf3f-48c2-4789-9775-18ed3ae24fd7-kube-api-access-26jb8\") pod \"cinder-operator-controller-manager-6498cbf48f-cttxc\" (UID: \"644aaf3f-48c2-4789-9775-18ed3ae24fd7\") " pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.030461 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.034278 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r79jw\" (UniqueName: \"kubernetes.io/projected/bf6721b8-a1f6-4d27-ad5a-c090e2dc8806-kube-api-access-r79jw\") pod \"heat-operator-controller-manager-7869d7c46b-np8cn\" (UID: \"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806\") " pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.035901 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.036793 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.042074 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-7tlgp" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.049749 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.060193 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.067947 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.068925 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.070511 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-rnmfk" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.072338 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8j8p\" (UniqueName: \"kubernetes.io/projected/46528db3-6717-4abb-a779-33290ae0c986-kube-api-access-k8j8p\") pod \"ironic-operator-controller-manager-99b499f4-7j69z\" (UID: \"46528db3-6717-4abb-a779-33290ae0c986\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.072392 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gqs8\" (UniqueName: \"kubernetes.io/projected/0996e99c-8565-426e-afa0-8a52ff2bee16-kube-api-access-4gqs8\") pod \"manila-operator-controller-manager-58f887965d-r2ctj\" (UID: \"0996e99c-8565-426e-afa0-8a52ff2bee16\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.072434 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rwng\" (UniqueName: \"kubernetes.io/projected/7e28261c-db91-4143-a418-1114acf60dc0-kube-api-access-6rwng\") pod \"keystone-operator-controller-manager-7454b96578-dvdzj\" (UID: \"7e28261c-db91-4143-a418-1114acf60dc0\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.072464 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tthp9\" (UniqueName: \"kubernetes.io/projected/72b66cb9-cb2a-4977-a3f1-3fe22508641e-kube-api-access-tthp9\") pod \"mariadb-operator-controller-manager-5f449d8fbc-bfqxw\" (UID: \"72b66cb9-cb2a-4977-a3f1-3fe22508641e\") " pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.072506 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt4sf\" (UniqueName: \"kubernetes.io/projected/0126a31b-68bb-46a7-8f3a-f34ad5d74e6d-kube-api-access-wt4sf\") pod \"neutron-operator-controller-manager-669dc6ff5f-crkkv\" (UID: \"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d\") " pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.079519 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.084675 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.093816 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.104329 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.105317 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.105721 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8j8p\" (UniqueName: \"kubernetes.io/projected/46528db3-6717-4abb-a779-33290ae0c986-kube-api-access-k8j8p\") pod \"ironic-operator-controller-manager-99b499f4-7j69z\" (UID: \"46528db3-6717-4abb-a779-33290ae0c986\") " pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.116437 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-dftrc" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.117340 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.120462 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.124143 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.125446 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.130241 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.133039 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-qk6xb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.133228 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.159136 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.178736 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rwng\" (UniqueName: \"kubernetes.io/projected/7e28261c-db91-4143-a418-1114acf60dc0-kube-api-access-6rwng\") pod \"keystone-operator-controller-manager-7454b96578-dvdzj\" (UID: \"7e28261c-db91-4143-a418-1114acf60dc0\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.178906 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tthp9\" (UniqueName: \"kubernetes.io/projected/72b66cb9-cb2a-4977-a3f1-3fe22508641e-kube-api-access-tthp9\") pod \"mariadb-operator-controller-manager-5f449d8fbc-bfqxw\" (UID: \"72b66cb9-cb2a-4977-a3f1-3fe22508641e\") " pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.179076 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt4sf\" (UniqueName: \"kubernetes.io/projected/0126a31b-68bb-46a7-8f3a-f34ad5d74e6d-kube-api-access-wt4sf\") pod \"neutron-operator-controller-manager-669dc6ff5f-crkkv\" (UID: \"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d\") " pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.179161 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.179230 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxkbb\" (UniqueName: \"kubernetes.io/projected/4c6f9a58-d6f5-426f-bb8d-e019401a015a-kube-api-access-zxkbb\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.179343 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k2rf\" (UniqueName: \"kubernetes.io/projected/71c1201e-62bb-4d32-945b-80cda1ff41ac-kube-api-access-9k2rf\") pod \"octavia-operator-controller-manager-54cfbf4c7d-c5z8p\" (UID: \"71c1201e-62bb-4d32-945b-80cda1ff41ac\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.179421 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gqs8\" (UniqueName: \"kubernetes.io/projected/0996e99c-8565-426e-afa0-8a52ff2bee16-kube-api-access-4gqs8\") pod \"manila-operator-controller-manager-58f887965d-r2ctj\" (UID: \"0996e99c-8565-426e-afa0-8a52ff2bee16\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.181029 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94w7d\" (UniqueName: \"kubernetes.io/projected/8b039ede-62fc-47ed-83ed-672e756887a1-kube-api-access-94w7d\") pod \"nova-operator-controller-manager-cfbb9c588-zjkmb\" (UID: \"8b039ede-62fc-47ed-83ed-672e756887a1\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.183365 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.209780 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.220954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt4sf\" (UniqueName: \"kubernetes.io/projected/0126a31b-68bb-46a7-8f3a-f34ad5d74e6d-kube-api-access-wt4sf\") pod \"neutron-operator-controller-manager-669dc6ff5f-crkkv\" (UID: \"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d\") " pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.221592 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tthp9\" (UniqueName: \"kubernetes.io/projected/72b66cb9-cb2a-4977-a3f1-3fe22508641e-kube-api-access-tthp9\") pod \"mariadb-operator-controller-manager-5f449d8fbc-bfqxw\" (UID: \"72b66cb9-cb2a-4977-a3f1-3fe22508641e\") " pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.224192 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gqs8\" (UniqueName: \"kubernetes.io/projected/0996e99c-8565-426e-afa0-8a52ff2bee16-kube-api-access-4gqs8\") pod \"manila-operator-controller-manager-58f887965d-r2ctj\" (UID: \"0996e99c-8565-426e-afa0-8a52ff2bee16\") " pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.229686 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.231394 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rwng\" (UniqueName: \"kubernetes.io/projected/7e28261c-db91-4143-a418-1114acf60dc0-kube-api-access-6rwng\") pod \"keystone-operator-controller-manager-7454b96578-dvdzj\" (UID: \"7e28261c-db91-4143-a418-1114acf60dc0\") " pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.248523 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.252679 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.263566 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.265168 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-5bqdk" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.270000 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.283610 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.284308 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gldn4\" (UniqueName: \"kubernetes.io/projected/ec00fa84-7dd0-46d6-b9f2-4a7b687b347b-kube-api-access-gldn4\") pod \"placement-operator-controller-manager-5b797b8dff-pgzg9\" (UID: \"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.284361 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.284384 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxkbb\" (UniqueName: \"kubernetes.io/projected/4c6f9a58-d6f5-426f-bb8d-e019401a015a-kube-api-access-zxkbb\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.284413 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k2rf\" (UniqueName: \"kubernetes.io/projected/71c1201e-62bb-4d32-945b-80cda1ff41ac-kube-api-access-9k2rf\") pod \"octavia-operator-controller-manager-54cfbf4c7d-c5z8p\" (UID: \"71c1201e-62bb-4d32-945b-80cda1ff41ac\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.284462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94w7d\" (UniqueName: \"kubernetes.io/projected/8b039ede-62fc-47ed-83ed-672e756887a1-kube-api-access-94w7d\") pod \"nova-operator-controller-manager-cfbb9c588-zjkmb\" (UID: \"8b039ede-62fc-47ed-83ed-672e756887a1\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.286109 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-jdm5d" Nov 22 10:54:06 crc kubenswrapper[4926]: E1122 10:54:06.286356 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:06 crc kubenswrapper[4926]: E1122 10:54:06.286403 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert podName:4c6f9a58-d6f5-426f-bb8d-e019401a015a nodeName:}" failed. No retries permitted until 2025-11-22 10:54:06.786383463 +0000 UTC m=+867.087988750 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert") pod "openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" (UID: "4c6f9a58-d6f5-426f-bb8d-e019401a015a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.304379 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.318670 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94w7d\" (UniqueName: \"kubernetes.io/projected/8b039ede-62fc-47ed-83ed-672e756887a1-kube-api-access-94w7d\") pod \"nova-operator-controller-manager-cfbb9c588-zjkmb\" (UID: \"8b039ede-62fc-47ed-83ed-672e756887a1\") " pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.319605 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxkbb\" (UniqueName: \"kubernetes.io/projected/4c6f9a58-d6f5-426f-bb8d-e019401a015a-kube-api-access-zxkbb\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.325548 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.346582 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.346965 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k2rf\" (UniqueName: \"kubernetes.io/projected/71c1201e-62bb-4d32-945b-80cda1ff41ac-kube-api-access-9k2rf\") pod \"octavia-operator-controller-manager-54cfbf4c7d-c5z8p\" (UID: \"71c1201e-62bb-4d32-945b-80cda1ff41ac\") " pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.348951 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.351437 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-cjgjd" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.361598 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.374597 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.376320 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.378818 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-vkjqb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.382816 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.387560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gldn4\" (UniqueName: \"kubernetes.io/projected/ec00fa84-7dd0-46d6-b9f2-4a7b687b347b-kube-api-access-gldn4\") pod \"placement-operator-controller-manager-5b797b8dff-pgzg9\" (UID: \"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.387605 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmvx7\" (UniqueName: \"kubernetes.io/projected/355d4b1d-9137-4cf5-aac8-e373d1b7d696-kube-api-access-rmvx7\") pod \"ovn-operator-controller-manager-587df66445-2hwd8\" (UID: \"355d4b1d-9137-4cf5-aac8-e373d1b7d696\") " pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.387677 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfblp\" (UniqueName: \"kubernetes.io/projected/e30ebbd3-daab-4ee4-acea-631c15b5045b-kube-api-access-jfblp\") pod \"swift-operator-controller-manager-d656998f4-j7xg8\" (UID: \"e30ebbd3-daab-4ee4-acea-631c15b5045b\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.396482 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.431586 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gldn4\" (UniqueName: \"kubernetes.io/projected/ec00fa84-7dd0-46d6-b9f2-4a7b687b347b-kube-api-access-gldn4\") pod \"placement-operator-controller-manager-5b797b8dff-pgzg9\" (UID: \"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b\") " pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.485823 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.490729 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.490803 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmvx7\" (UniqueName: \"kubernetes.io/projected/355d4b1d-9137-4cf5-aac8-e373d1b7d696-kube-api-access-rmvx7\") pod \"ovn-operator-controller-manager-587df66445-2hwd8\" (UID: \"355d4b1d-9137-4cf5-aac8-e373d1b7d696\") " pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.490875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfblp\" (UniqueName: \"kubernetes.io/projected/e30ebbd3-daab-4ee4-acea-631c15b5045b-kube-api-access-jfblp\") pod \"swift-operator-controller-manager-d656998f4-j7xg8\" (UID: \"e30ebbd3-daab-4ee4-acea-631c15b5045b\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.490959 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kj4np\" (UniqueName: \"kubernetes.io/projected/6c866ac0-e106-4a90-a223-435b244634b5-kube-api-access-kj4np\") pod \"telemetry-operator-controller-manager-6d4bf84b58-95jv5\" (UID: \"6c866ac0-e106-4a90-a223-435b244634b5\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.494158 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/dc80ed79-7a34-4756-b5ed-0b3cda532910-cert\") pod \"infra-operator-controller-manager-7875d8bb94-pr7tn\" (UID: \"dc80ed79-7a34-4756-b5ed-0b3cda532910\") " pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.506852 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.511230 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.524923 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmvx7\" (UniqueName: \"kubernetes.io/projected/355d4b1d-9137-4cf5-aac8-e373d1b7d696-kube-api-access-rmvx7\") pod \"ovn-operator-controller-manager-587df66445-2hwd8\" (UID: \"355d4b1d-9137-4cf5-aac8-e373d1b7d696\") " pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.526003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfblp\" (UniqueName: \"kubernetes.io/projected/e30ebbd3-daab-4ee4-acea-631c15b5045b-kube-api-access-jfblp\") pod \"swift-operator-controller-manager-d656998f4-j7xg8\" (UID: \"e30ebbd3-daab-4ee4-acea-631c15b5045b\") " pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.530789 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.532179 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.545641 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-wtzjm" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.566875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.591868 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kj4np\" (UniqueName: \"kubernetes.io/projected/6c866ac0-e106-4a90-a223-435b244634b5-kube-api-access-kj4np\") pod \"telemetry-operator-controller-manager-6d4bf84b58-95jv5\" (UID: \"6c866ac0-e106-4a90-a223-435b244634b5\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.597561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz5ds\" (UniqueName: \"kubernetes.io/projected/dab1442d-6ad4-4d03-b520-a12d7a4d6c9d-kube-api-access-tz5ds\") pod \"test-operator-controller-manager-6f44bf845f-7vhg5\" (UID: \"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d\") " pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.601598 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jbrrc" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="registry-server" containerID="cri-o://675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843" gracePeriod=2 Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.616986 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.619580 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.646599 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kj4np\" (UniqueName: \"kubernetes.io/projected/6c866ac0-e106-4a90-a223-435b244634b5-kube-api-access-kj4np\") pod \"telemetry-operator-controller-manager-6d4bf84b58-95jv5\" (UID: \"6c866ac0-e106-4a90-a223-435b244634b5\") " pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.663595 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.672988 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.694993 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.687217 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.675185 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.702149 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.710277 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-7cn4r" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.717743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz5ds\" (UniqueName: \"kubernetes.io/projected/dab1442d-6ad4-4d03-b520-a12d7a4d6c9d-kube-api-access-tz5ds\") pod \"test-operator-controller-manager-6f44bf845f-7vhg5\" (UID: \"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d\") " pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.728778 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.772575 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz5ds\" (UniqueName: \"kubernetes.io/projected/dab1442d-6ad4-4d03-b520-a12d7a4d6c9d-kube-api-access-tz5ds\") pod \"test-operator-controller-manager-6f44bf845f-7vhg5\" (UID: \"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d\") " pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.781992 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.783087 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.787569 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-pppzb" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.787749 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.798292 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.821193 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.824921 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.825938 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.829275 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-p2rwz" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.830232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.830417 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdpb2\" (UniqueName: \"kubernetes.io/projected/3947549a-e067-4135-ba36-1e2663db15c0-kube-api-access-qdpb2\") pod \"watcher-operator-controller-manager-85494d54fc-czf4h\" (UID: \"3947549a-e067-4135-ba36-1e2663db15c0\") " pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:06 crc kubenswrapper[4926]: E1122 10:54:06.834140 4926 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:06 crc kubenswrapper[4926]: E1122 10:54:06.834210 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert podName:4c6f9a58-d6f5-426f-bb8d-e019401a015a nodeName:}" failed. No retries permitted until 2025-11-22 10:54:07.834194896 +0000 UTC m=+868.135800173 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert") pod "openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" (UID: "4c6f9a58-d6f5-426f-bb8d-e019401a015a") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.836077 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq"] Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.874812 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.932588 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdpb2\" (UniqueName: \"kubernetes.io/projected/3947549a-e067-4135-ba36-1e2663db15c0-kube-api-access-qdpb2\") pod \"watcher-operator-controller-manager-85494d54fc-czf4h\" (UID: \"3947549a-e067-4135-ba36-1e2663db15c0\") " pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.932685 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-db6j8\" (UniqueName: \"kubernetes.io/projected/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-kube-api-access-db6j8\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.932724 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbbq5\" (UniqueName: \"kubernetes.io/projected/3ab27f1b-e328-46d1-b9e5-b29e2caedef6-kube-api-access-hbbq5\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq\" (UID: \"3ab27f1b-e328-46d1-b9e5-b29e2caedef6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.932796 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-cert\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:06 crc kubenswrapper[4926]: I1122 10:54:06.965704 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdpb2\" (UniqueName: \"kubernetes.io/projected/3947549a-e067-4135-ba36-1e2663db15c0-kube-api-access-qdpb2\") pod \"watcher-operator-controller-manager-85494d54fc-czf4h\" (UID: \"3947549a-e067-4135-ba36-1e2663db15c0\") " pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.033646 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbbq5\" (UniqueName: \"kubernetes.io/projected/3ab27f1b-e328-46d1-b9e5-b29e2caedef6-kube-api-access-hbbq5\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq\" (UID: \"3ab27f1b-e328-46d1-b9e5-b29e2caedef6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.034113 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-cert\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.034206 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-db6j8\" (UniqueName: \"kubernetes.io/projected/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-kube-api-access-db6j8\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.039866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-cert\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.051106 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbbq5\" (UniqueName: \"kubernetes.io/projected/3ab27f1b-e328-46d1-b9e5-b29e2caedef6-kube-api-access-hbbq5\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq\" (UID: \"3ab27f1b-e328-46d1-b9e5-b29e2caedef6\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.053165 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-db6j8\" (UniqueName: \"kubernetes.io/projected/d292d5fa-12ea-40d0-a6df-1f6e9f5c8059-kube-api-access-db6j8\") pod \"openstack-operator-controller-manager-64844fbb8-hngj4\" (UID: \"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059\") " pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.103927 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.123587 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.156437 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.290087 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.294826 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.354789 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.363420 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.413365 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.540662 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities\") pod \"6307ac6a-e49c-46f5-858a-b207f0b61910\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.540810 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttsjc\" (UniqueName: \"kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc\") pod \"6307ac6a-e49c-46f5-858a-b207f0b61910\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.540855 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content\") pod \"6307ac6a-e49c-46f5-858a-b207f0b61910\" (UID: \"6307ac6a-e49c-46f5-858a-b207f0b61910\") " Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.542502 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities" (OuterVolumeSpecName: "utilities") pod "6307ac6a-e49c-46f5-858a-b207f0b61910" (UID: "6307ac6a-e49c-46f5-858a-b207f0b61910"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.550804 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc" (OuterVolumeSpecName: "kube-api-access-ttsjc") pod "6307ac6a-e49c-46f5-858a-b207f0b61910" (UID: "6307ac6a-e49c-46f5-858a-b207f0b61910"). InnerVolumeSpecName "kube-api-access-ttsjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.557594 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6307ac6a-e49c-46f5-858a-b207f0b61910" (UID: "6307ac6a-e49c-46f5-858a-b207f0b61910"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.621778 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" event={"ID":"02d4d3c4-4951-4f41-8605-239ac95dae92","Type":"ContainerStarted","Data":"ad4ad2b468e04223e9a8ac6d2c91201aa22eeda861d0fcfbefd586397956e6f3"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.632363 4926 generic.go:334] "Generic (PLEG): container finished" podID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerID="675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843" exitCode=0 Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.632477 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerDied","Data":"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.632484 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jbrrc" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.632505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jbrrc" event={"ID":"6307ac6a-e49c-46f5-858a-b207f0b61910","Type":"ContainerDied","Data":"a543444c1f06fd651e081443a222673f941aa8ea7d238b7d685d19a9aa1668af"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.632523 4926 scope.go:117] "RemoveContainer" containerID="675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.638753 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" event={"ID":"f8406cda-67f4-425a-83f1-ab90cf4ebf0c","Type":"ContainerStarted","Data":"0d8f3af8dd4b2d541716bc70dacd68ace2bbde139a5a83b2ce1c3225d0f7daad"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.641940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" event={"ID":"644aaf3f-48c2-4789-9775-18ed3ae24fd7","Type":"ContainerStarted","Data":"f798611f99f156dd23964e76cc07baa97955b034fda6b7c6b20684c9e8542503"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.642072 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttsjc\" (UniqueName: \"kubernetes.io/projected/6307ac6a-e49c-46f5-858a-b207f0b61910-kube-api-access-ttsjc\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.642093 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.642105 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6307ac6a-e49c-46f5-858a-b207f0b61910-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.643290 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" event={"ID":"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806","Type":"ContainerStarted","Data":"5ed52892de5a31dd453f421312e1d07804abc9c0e11434f91d3ca4a1db925c27"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.645988 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" event={"ID":"46528db3-6717-4abb-a779-33290ae0c986","Type":"ContainerStarted","Data":"bf77da3756b08f4b182bf6939ae1c2d86116d560da919293ae4f10db60940f31"} Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.665386 4926 scope.go:117] "RemoveContainer" containerID="6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.667654 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.673207 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jbrrc"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.702849 4926 scope.go:117] "RemoveContainer" containerID="74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.708607 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.714112 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.723816 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.727390 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.733234 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.738277 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.743196 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-7969689c84-qshq6"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.750105 4926 scope.go:117] "RemoveContainer" containerID="675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843" Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.750457 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843\": container with ID starting with 675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843 not found: ID does not exist" containerID="675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.750592 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843"} err="failed to get container status \"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843\": rpc error: code = NotFound desc = could not find container \"675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843\": container with ID starting with 675ff17a73e3eb56f159869d655f1042acf8ef82a410907e6ae813c80c454843 not found: ID does not exist" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.750686 4926 scope.go:117] "RemoveContainer" containerID="6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d" Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.751074 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d\": container with ID starting with 6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d not found: ID does not exist" containerID="6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.751094 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d"} err="failed to get container status \"6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d\": rpc error: code = NotFound desc = could not find container \"6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d\": container with ID starting with 6b28b3335dbe8dcfe436cf102a88bdd887bc9ca114f850bee94aa1954b61fa7d not found: ID does not exist" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.751112 4926 scope.go:117] "RemoveContainer" containerID="74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea" Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.751442 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea\": container with ID starting with 74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea not found: ID does not exist" containerID="74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.751542 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea"} err="failed to get container status \"74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea\": rpc error: code = NotFound desc = could not find container \"74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea\": container with ID starting with 74523a1e8c3a917685633fe60eea4a4c08741b68775123f7cce11bd8cc4d6eea not found: ID does not exist" Nov 22 10:54:07 crc kubenswrapper[4926]: W1122 10:54:07.753821 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0996e99c_8565_426e_afa0_8a52ff2bee16.slice/crio-7a7be342341922c9eb113c1206ad22a77a52dde208d8897247c82e5ccf9ea12e WatchSource:0}: Error finding container 7a7be342341922c9eb113c1206ad22a77a52dde208d8897247c82e5ccf9ea12e: Status 404 returned error can't find the container with id 7a7be342341922c9eb113c1206ad22a77a52dde208d8897247c82e5ccf9ea12e Nov 22 10:54:07 crc kubenswrapper[4926]: W1122 10:54:07.756703 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf3502c04_7310_4659_aa47_b91b71ff3b30.slice/crio-2b0f08e1c4b6728b2a15f48a5fe8239007752b61e5a5aee43f4194450b4b6ba8 WatchSource:0}: Error finding container 2b0f08e1c4b6728b2a15f48a5fe8239007752b61e5a5aee43f4194450b4b6ba8: Status 404 returned error can't find the container with id 2b0f08e1c4b6728b2a15f48a5fe8239007752b61e5a5aee43f4194450b4b6ba8 Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.844518 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.862606 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4c6f9a58-d6f5-426f-bb8d-e019401a015a-cert\") pod \"openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss\" (UID: \"4c6f9a58-d6f5-426f-bb8d-e019401a015a\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.866350 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9"] Nov 22 10:54:07 crc kubenswrapper[4926]: W1122 10:54:07.874378 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod355d4b1d_9137_4cf5_aac8_e373d1b7d696.slice/crio-8991c38c93d3d9770cb200addb14bb9316f87414d7bb92504d8735b04b25ddd8 WatchSource:0}: Error finding container 8991c38c93d3d9770cb200addb14bb9316f87414d7bb92504d8735b04b25ddd8: Status 404 returned error can't find the container with id 8991c38c93d3d9770cb200addb14bb9316f87414d7bb92504d8735b04b25ddd8 Nov 22 10:54:07 crc kubenswrapper[4926]: W1122 10:54:07.879460 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8b039ede_62fc_47ed_83ed_672e756887a1.slice/crio-066c3cf24ec184a6c8fc1ec779a9c5febe30aac276f6d5cafbf40dbd3824b4d3 WatchSource:0}: Error finding container 066c3cf24ec184a6c8fc1ec779a9c5febe30aac276f6d5cafbf40dbd3824b4d3: Status 404 returned error can't find the container with id 066c3cf24ec184a6c8fc1ec779a9c5febe30aac276f6d5cafbf40dbd3824b4d3 Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.880778 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8"] Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.882274 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-94w7d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-cfbb9c588-zjkmb_openstack-operators(8b039ede-62fc-47ed-83ed-672e756887a1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:07 crc kubenswrapper[4926]: W1122 10:54:07.887048 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podec00fa84_7dd0_46d6_b9f2_4a7b687b347b.slice/crio-c43f3ecb0b0625cd62cb263b51e7c519212e9c0695abe49985db325477557306 WatchSource:0}: Error finding container c43f3ecb0b0625cd62cb263b51e7c519212e9c0695abe49985db325477557306: Status 404 returned error can't find the container with id c43f3ecb0b0625cd62cb263b51e7c519212e9c0695abe49985db325477557306 Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.890563 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb"] Nov 22 10:54:07 crc kubenswrapper[4926]: I1122 10:54:07.895942 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p"] Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.896861 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9k2rf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-54cfbf4c7d-c5z8p_openstack-operators(71c1201e-62bb-4d32-945b-80cda1ff41ac): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:07 crc kubenswrapper[4926]: E1122 10:54:07.907748 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gldn4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b797b8dff-pgzg9_openstack-operators(ec00fa84-7dd0-46d6-b9f2-4a7b687b347b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.052353 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" podUID="71c1201e-62bb-4d32-945b-80cda1ff41ac" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.074972 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" podUID="8b039ede-62fc-47ed-83ed-672e756887a1" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.118813 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" podUID="ec00fa84-7dd0-46d6-b9f2-4a7b687b347b" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.119967 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5"] Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.120756 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.125141 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5"] Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.128056 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8"] Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.142777 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4"] Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.155567 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq"] Nov 22 10:54:08 crc kubenswrapper[4926]: W1122 10:54:08.158736 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c866ac0_e106_4a90_a223_435b244634b5.slice/crio-f44605607f230f61cc69b3f58decb210e550b5ed938264dd114a5fac2ac5910f WatchSource:0}: Error finding container f44605607f230f61cc69b3f58decb210e550b5ed938264dd114a5fac2ac5910f: Status 404 returned error can't find the container with id f44605607f230f61cc69b3f58decb210e550b5ed938264dd114a5fac2ac5910f Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.170388 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h"] Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.194147 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hbbq5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq_openstack-operators(3ab27f1b-e328-46d1-b9e5-b29e2caedef6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.193857 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tz5ds,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-6f44bf845f-7vhg5_openstack-operators(dab1442d-6ad4-4d03-b520-a12d7a4d6c9d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.195215 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" podUID="3ab27f1b-e328-46d1-b9e5-b29e2caedef6" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.238302 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qdpb2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-85494d54fc-czf4h_openstack-operators(3947549a-e067-4135-ba36-1e2663db15c0): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.438939 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" podUID="dab1442d-6ad4-4d03-b520-a12d7a4d6c9d" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.514020 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" podUID="3947549a-e067-4135-ba36-1e2663db15c0" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.608308 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" path="/var/lib/kubelet/pods/6307ac6a-e49c-46f5-858a-b207f0b61910/volumes" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.646666 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss"] Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.657769 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" event={"ID":"8b039ede-62fc-47ed-83ed-672e756887a1","Type":"ContainerStarted","Data":"80dbdde3c5f8a2051d753684b36a4f46e5996a576dc35b1fa61e0732a99b8ee0"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.657817 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" event={"ID":"8b039ede-62fc-47ed-83ed-672e756887a1","Type":"ContainerStarted","Data":"066c3cf24ec184a6c8fc1ec779a9c5febe30aac276f6d5cafbf40dbd3824b4d3"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.660970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" event={"ID":"71c1201e-62bb-4d32-945b-80cda1ff41ac","Type":"ContainerStarted","Data":"54edbdd5d81e3b790c8c3ee48b5c185387714363b6745034887c78726ad154ff"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.661022 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" event={"ID":"71c1201e-62bb-4d32-945b-80cda1ff41ac","Type":"ContainerStarted","Data":"cb36e05cf1cd364e42fd17d45acffe494f2d9fb0b0874c2c2757a8cde9b8ccea"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.663368 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" event={"ID":"3ebbbdf8-da82-4f02-a8f5-509de3b56721","Type":"ContainerStarted","Data":"02f6c3e00d8e67a729cd77bf05ad7892f04d4dfc898b79c88742381e2d5a2b26"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.664184 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" event={"ID":"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d","Type":"ContainerStarted","Data":"554037a09e63b79d2029f6f0f1af561811e854b2dd67c95d6256d33e1c68915b"} Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.671844 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" podUID="71c1201e-62bb-4d32-945b-80cda1ff41ac" Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.672181 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" podUID="8b039ede-62fc-47ed-83ed-672e756887a1" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.680321 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" event={"ID":"3ab27f1b-e328-46d1-b9e5-b29e2caedef6","Type":"ContainerStarted","Data":"9e8c690ba9824a75999d835ba3f298b5f4c8dfbae9423dafd58f82e03e525b63"} Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.681644 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" podUID="3ab27f1b-e328-46d1-b9e5-b29e2caedef6" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.682011 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" event={"ID":"0996e99c-8565-426e-afa0-8a52ff2bee16","Type":"ContainerStarted","Data":"7a7be342341922c9eb113c1206ad22a77a52dde208d8897247c82e5ccf9ea12e"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.687209 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" event={"ID":"dc80ed79-7a34-4756-b5ed-0b3cda532910","Type":"ContainerStarted","Data":"2f9cb58c4b31d3b3013c6a6346ba3b7257ce5d698b14434c6d237ee1fabd24ec"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.695056 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" event={"ID":"6c866ac0-e106-4a90-a223-435b244634b5","Type":"ContainerStarted","Data":"f44605607f230f61cc69b3f58decb210e550b5ed938264dd114a5fac2ac5910f"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.732423 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" event={"ID":"355d4b1d-9137-4cf5-aac8-e373d1b7d696","Type":"ContainerStarted","Data":"8991c38c93d3d9770cb200addb14bb9316f87414d7bb92504d8735b04b25ddd8"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.739940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" event={"ID":"f3502c04-7310-4659-aa47-b91b71ff3b30","Type":"ContainerStarted","Data":"2b0f08e1c4b6728b2a15f48a5fe8239007752b61e5a5aee43f4194450b4b6ba8"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.759490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" event={"ID":"7e28261c-db91-4143-a418-1114acf60dc0","Type":"ContainerStarted","Data":"07f69dde3e3e56ba0c6074264f3de9bc9b4a389ffd8b888ca4d7f2649b96427e"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.780405 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" event={"ID":"3947549a-e067-4135-ba36-1e2663db15c0","Type":"ContainerStarted","Data":"607d2acf34bf9943de39a70b2dd28ad34561cc49a04ad4ac70fb22b999d42919"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.780467 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" event={"ID":"3947549a-e067-4135-ba36-1e2663db15c0","Type":"ContainerStarted","Data":"94f1a9255f5b13604b134ce48b3a2f036e76aa99a52bddb6d28463faa634432d"} Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.795299 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" podUID="3947549a-e067-4135-ba36-1e2663db15c0" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.811095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" event={"ID":"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b","Type":"ContainerStarted","Data":"a93cafb6e89d3f25efb6a6c0f4ba81b0cb7814ac8a6bcbbc0938de368670be34"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.811148 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" event={"ID":"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b","Type":"ContainerStarted","Data":"c43f3ecb0b0625cd62cb263b51e7c519212e9c0695abe49985db325477557306"} Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.815376 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" podUID="ec00fa84-7dd0-46d6-b9f2-4a7b687b347b" Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.817053 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" event={"ID":"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d","Type":"ContainerStarted","Data":"db2b6a52886f3ad23a0b4105e3b5b73d9c6b5b4048820f3583c908ad9eb68454"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.817104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" event={"ID":"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d","Type":"ContainerStarted","Data":"4e94e1d090b8e87212914907c714267b720eabfb78b24992973736631c150b9a"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.819703 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" event={"ID":"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059","Type":"ContainerStarted","Data":"ee6215dc06a1e946bd7b8b882d76ea2e34f5126a58485b061e83b913e05ccf94"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.819736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" event={"ID":"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059","Type":"ContainerStarted","Data":"99a5ac1263db49c1d119719a58c12eec0a8a69b3857c41afdb07a5e5d560b2cd"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.820815 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" event={"ID":"72b66cb9-cb2a-4977-a3f1-3fe22508641e","Type":"ContainerStarted","Data":"de58bfcccaf56586becd7783153fa16efb9d709b7fb6383aea6fa1493fd9d160"} Nov 22 10:54:08 crc kubenswrapper[4926]: I1122 10:54:08.821854 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" event={"ID":"e30ebbd3-daab-4ee4-acea-631c15b5045b","Type":"ContainerStarted","Data":"0cac6fe81cbd9a5c713bf7e109a0d6d16168c44b4955d3786657a9277ac8353e"} Nov 22 10:54:08 crc kubenswrapper[4926]: E1122 10:54:08.824077 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\"" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" podUID="dab1442d-6ad4-4d03-b520-a12d7a4d6c9d" Nov 22 10:54:09 crc kubenswrapper[4926]: I1122 10:54:09.840484 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" event={"ID":"d292d5fa-12ea-40d0-a6df-1f6e9f5c8059","Type":"ContainerStarted","Data":"a02c7b1e17b6ee32b4306e4852f2ec23426c1d7c891b5178fb3aa48ef61575d6"} Nov 22 10:54:09 crc kubenswrapper[4926]: I1122 10:54:09.840993 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:09 crc kubenswrapper[4926]: I1122 10:54:09.842647 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" event={"ID":"4c6f9a58-d6f5-426f-bb8d-e019401a015a","Type":"ContainerStarted","Data":"f80022a9314f47c848367d46fcd5b7bdf34eb72d7dcf60f9fd31fd4b0051e9df"} Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846406 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\"" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" podUID="dab1442d-6ad4-4d03-b520-a12d7a4d6c9d" Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846450 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" podUID="ec00fa84-7dd0-46d6-b9f2-4a7b687b347b" Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846518 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7\\\"\"" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" podUID="8b039ede-62fc-47ed-83ed-672e756887a1" Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846556 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" podUID="3947549a-e067-4135-ba36-1e2663db15c0" Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846589 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" podUID="3ab27f1b-e328-46d1-b9e5-b29e2caedef6" Nov 22 10:54:09 crc kubenswrapper[4926]: E1122 10:54:09.846621 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" podUID="71c1201e-62bb-4d32-945b-80cda1ff41ac" Nov 22 10:54:09 crc kubenswrapper[4926]: I1122 10:54:09.862723 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" podStartSLOduration=3.862706356 podStartE2EDuration="3.862706356s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:54:09.861562683 +0000 UTC m=+870.163167970" watchObservedRunningTime="2025-11-22 10:54:09.862706356 +0000 UTC m=+870.164311643" Nov 22 10:54:17 crc kubenswrapper[4926]: I1122 10:54:17.132582 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-64844fbb8-hngj4" Nov 22 10:54:19 crc kubenswrapper[4926]: E1122 10:54:19.574402 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a" Nov 22 10:54:19 crc kubenswrapper[4926]: E1122 10:54:19.574974 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4gqs8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-58f887965d-r2ctj_openstack-operators(0996e99c-8565-426e-afa0-8a52ff2bee16): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:54:20 crc kubenswrapper[4926]: E1122 10:54:20.312676 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991" Nov 22 10:54:20 crc kubenswrapper[4926]: E1122 10:54:20.313470 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-g4jr6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-7969689c84-qshq6_openstack-operators(3ebbbdf8-da82-4f02-a8f5-509de3b56721): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.235363 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.235584 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-jfblp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d656998f4-j7xg8_openstack-operators(e30ebbd3-daab-4ee4-acea-631c15b5045b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.645655 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:8ee0c0ec6eb84890fc3f6b1ee392fd34adf1720ad94e70e8969b213cce6045f0" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.645814 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8ee0c0ec6eb84890fc3f6b1ee392fd34adf1720ad94e70e8969b213cce6045f0,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rmvx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-587df66445-2hwd8_openstack-operators(355d4b1d-9137-4cf5-aac8-e373d1b7d696): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.731351 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.732187 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="extract-utilities" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.732204 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="extract-utilities" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.732219 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="extract-content" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.732225 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="extract-content" Nov 22 10:54:21 crc kubenswrapper[4926]: E1122 10:54:21.732256 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="registry-server" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.732263 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="registry-server" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.732448 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6307ac6a-e49c-46f5-858a-b207f0b61910" containerName="registry-server" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.734111 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.739478 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.858791 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hnb5\" (UniqueName: \"kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.858850 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.858934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.960120 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.960682 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.960832 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.961014 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hnb5\" (UniqueName: \"kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.961110 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:21 crc kubenswrapper[4926]: I1122 10:54:21.992873 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hnb5\" (UniqueName: \"kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5\") pod \"certified-operators-t96vk\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.059285 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:22 crc kubenswrapper[4926]: E1122 10:54:22.432599 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" podUID="e30ebbd3-daab-4ee4-acea-631c15b5045b" Nov 22 10:54:22 crc kubenswrapper[4926]: E1122 10:54:22.623788 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" podUID="0996e99c-8565-426e-afa0-8a52ff2bee16" Nov 22 10:54:22 crc kubenswrapper[4926]: E1122 10:54:22.644216 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" podUID="355d4b1d-9137-4cf5-aac8-e373d1b7d696" Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.737208 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:22 crc kubenswrapper[4926]: W1122 10:54:22.763330 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1062e5f6_8adb_46c7_bf38_872992e70102.slice/crio-053e0f9409bcf302c9e627dbfb3afbcc1d59ca34821af02b5b22d1d16abb8092 WatchSource:0}: Error finding container 053e0f9409bcf302c9e627dbfb3afbcc1d59ca34821af02b5b22d1d16abb8092: Status 404 returned error can't find the container with id 053e0f9409bcf302c9e627dbfb3afbcc1d59ca34821af02b5b22d1d16abb8092 Nov 22 10:54:22 crc kubenswrapper[4926]: E1122 10:54:22.794198 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" podUID="3ebbbdf8-da82-4f02-a8f5-509de3b56721" Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.959940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" event={"ID":"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d","Type":"ContainerStarted","Data":"a3375948a740a177527e23b05680f6fe3b76bf7dbaae7539027a44e9014d26cc"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.963032 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" event={"ID":"72b66cb9-cb2a-4977-a3f1-3fe22508641e","Type":"ContainerStarted","Data":"18047c2ab2f959929a4da3ddb8d6b9b1e9a7abe8d32557dd1aa0ce4f7fc6e086"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.964863 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" event={"ID":"e30ebbd3-daab-4ee4-acea-631c15b5045b","Type":"ContainerStarted","Data":"10cd4fd7056864b760a732dd25f9599d1b93d6f6dc5647f8e8308009310427f1"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.968919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" event={"ID":"46528db3-6717-4abb-a779-33290ae0c986","Type":"ContainerStarted","Data":"3a5dd9f972d81b8f5a91e86acd7d34de309829e2eecd92412f137c8c7f3e1855"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.981450 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" event={"ID":"0996e99c-8565-426e-afa0-8a52ff2bee16","Type":"ContainerStarted","Data":"39ec08d35ef8f3d674a203aae3c9c80430d1898a83fef9b78d3879717a3afb7c"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.988140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" event={"ID":"644aaf3f-48c2-4789-9775-18ed3ae24fd7","Type":"ContainerStarted","Data":"a9b673db7ca36856ad1af7a15fe62d25cdfc6237495e353a2ef90c6a17dc1016"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.989797 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" event={"ID":"3ebbbdf8-da82-4f02-a8f5-509de3b56721","Type":"ContainerStarted","Data":"c335781cb84f004e4597687988b5fd5cb956ae42ae2c395b932629612eb95531"} Nov 22 10:54:22 crc kubenswrapper[4926]: I1122 10:54:22.996467 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" event={"ID":"02d4d3c4-4951-4f41-8605-239ac95dae92","Type":"ContainerStarted","Data":"73e592eb9137907eb51b3cc30c962e984e4ea55d2a88f1b7d68a721114a8c9e2"} Nov 22 10:54:23 crc kubenswrapper[4926]: E1122 10:54:23.004739 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" podUID="0996e99c-8565-426e-afa0-8a52ff2bee16" Nov 22 10:54:23 crc kubenswrapper[4926]: I1122 10:54:23.004873 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerStarted","Data":"053e0f9409bcf302c9e627dbfb3afbcc1d59ca34821af02b5b22d1d16abb8092"} Nov 22 10:54:23 crc kubenswrapper[4926]: E1122 10:54:23.005000 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" podUID="e30ebbd3-daab-4ee4-acea-631c15b5045b" Nov 22 10:54:23 crc kubenswrapper[4926]: E1122 10:54:23.005095 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991\\\"\"" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" podUID="3ebbbdf8-da82-4f02-a8f5-509de3b56721" Nov 22 10:54:23 crc kubenswrapper[4926]: I1122 10:54:23.021505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" event={"ID":"355d4b1d-9137-4cf5-aac8-e373d1b7d696","Type":"ContainerStarted","Data":"7223cfc680b08c93dc9b8a90a7ae6633937be08378e4ed23415465697530b995"} Nov 22 10:54:23 crc kubenswrapper[4926]: E1122 10:54:23.027505 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8ee0c0ec6eb84890fc3f6b1ee392fd34adf1720ad94e70e8969b213cce6045f0\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" podUID="355d4b1d-9137-4cf5-aac8-e373d1b7d696" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.035193 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" event={"ID":"72b66cb9-cb2a-4977-a3f1-3fe22508641e","Type":"ContainerStarted","Data":"e4830af3143e7a49d4f17c5a810ac3064261a40bf29d264981b25ea5a1aac27c"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.035561 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.047720 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" event={"ID":"6c866ac0-e106-4a90-a223-435b244634b5","Type":"ContainerStarted","Data":"592c3fb2456e903b0b069e8eff1d08b940878207e3d24e8f5623cec20b430bbb"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.047788 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" event={"ID":"6c866ac0-e106-4a90-a223-435b244634b5","Type":"ContainerStarted","Data":"c81a8d97ecd00e5bcecb945377710035782811b788ceae52ca7460f86735fa83"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.048195 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.050725 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" event={"ID":"0126a31b-68bb-46a7-8f3a-f34ad5d74e6d","Type":"ContainerStarted","Data":"02815ce65595704efefeff81b1aada23ffb9b33ab939a2b80852f21f9e83eb7a"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.051524 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.054867 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" podStartSLOduration=4.975426882 podStartE2EDuration="19.054843716s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.775382323 +0000 UTC m=+868.076987610" lastFinishedPulling="2025-11-22 10:54:21.854799157 +0000 UTC m=+882.156404444" observedRunningTime="2025-11-22 10:54:24.051189942 +0000 UTC m=+884.352795229" watchObservedRunningTime="2025-11-22 10:54:24.054843716 +0000 UTC m=+884.356449003" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.057381 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" event={"ID":"7e28261c-db91-4143-a418-1114acf60dc0","Type":"ContainerStarted","Data":"0f2616907eeb63f9e6a51550396c904821f4336c5d8c2e222070e8b4fdf4a69c"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.065222 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" event={"ID":"dc80ed79-7a34-4756-b5ed-0b3cda532910","Type":"ContainerStarted","Data":"8b3b851667de4a16789ff96e2ce8f3857753128daa031806cb5a991617829cfd"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.065265 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" event={"ID":"dc80ed79-7a34-4756-b5ed-0b3cda532910","Type":"ContainerStarted","Data":"6006467e9f4b64b561ad719bf4c8678bc703222ee5317778adb0266995bb09c3"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.065947 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.068799 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" event={"ID":"f8406cda-67f4-425a-83f1-ab90cf4ebf0c","Type":"ContainerStarted","Data":"3fb1d8503d63e82e411514ab8f6f6873645aa06266f1cb701fd2d5d9726e0b61"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.068821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" event={"ID":"f8406cda-67f4-425a-83f1-ab90cf4ebf0c","Type":"ContainerStarted","Data":"5dd2b6e9c792ff06bb66f3b68c5d9baa77f73f4d5bad3c3cb7ec2e23bb47e517"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.069269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.071112 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" event={"ID":"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806","Type":"ContainerStarted","Data":"48b346de29df3cd0a91e9b20460acd576db660eb91c9d409e4dbb137bee1ba0f"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.071144 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" event={"ID":"bf6721b8-a1f6-4d27-ad5a-c090e2dc8806","Type":"ContainerStarted","Data":"509fe4b761f6d6b6876742080aecaed0b6e15665d1ad3fbc008580198bdf3020"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.071434 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.084224 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" podStartSLOduration=5.01496365 podStartE2EDuration="19.084209234s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.765391908 +0000 UTC m=+868.066997195" lastFinishedPulling="2025-11-22 10:54:21.834637502 +0000 UTC m=+882.136242779" observedRunningTime="2025-11-22 10:54:24.083447982 +0000 UTC m=+884.385053279" watchObservedRunningTime="2025-11-22 10:54:24.084209234 +0000 UTC m=+884.385814521" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.100389 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" event={"ID":"02d4d3c4-4951-4f41-8605-239ac95dae92","Type":"ContainerStarted","Data":"e5780c516e6e1f0f78a1c1e72e16fbaa6d0a7581d93ccc8064c1aedbba76c019"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.101221 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.112049 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" podStartSLOduration=4.470998773 podStartE2EDuration="18.112027568s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.186591706 +0000 UTC m=+868.488196993" lastFinishedPulling="2025-11-22 10:54:21.827620501 +0000 UTC m=+882.129225788" observedRunningTime="2025-11-22 10:54:24.107214951 +0000 UTC m=+884.408820248" watchObservedRunningTime="2025-11-22 10:54:24.112027568 +0000 UTC m=+884.413632855" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.112178 4926 generic.go:334] "Generic (PLEG): container finished" podID="1062e5f6-8adb-46c7-bf38-872992e70102" containerID="19651149505e537358620d38f7df48b729baea6be93550859dc8c5287f81e67e" exitCode=0 Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.112239 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerDied","Data":"19651149505e537358620d38f7df48b729baea6be93550859dc8c5287f81e67e"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.133990 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" event={"ID":"f3502c04-7310-4659-aa47-b91b71ff3b30","Type":"ContainerStarted","Data":"2995baa75885155343a974f3152e9e800f2330b11ba7c614ba7fd7cf406389dd"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.134037 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" event={"ID":"f3502c04-7310-4659-aa47-b91b71ff3b30","Type":"ContainerStarted","Data":"1e027b3d17561dbc09e7011b020b649a1df09c144a797a0773f8a9940a7971fe"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.134633 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.149871 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" podStartSLOduration=4.676509252 podStartE2EDuration="19.149851957s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.385131757 +0000 UTC m=+867.686737044" lastFinishedPulling="2025-11-22 10:54:21.858474462 +0000 UTC m=+882.160079749" observedRunningTime="2025-11-22 10:54:24.133598954 +0000 UTC m=+884.435204241" watchObservedRunningTime="2025-11-22 10:54:24.149851957 +0000 UTC m=+884.451457244" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.152105 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" event={"ID":"4c6f9a58-d6f5-426f-bb8d-e019401a015a","Type":"ContainerStarted","Data":"fd21874d77729b8ab02a10b1666a70ab4262f9f427e428f324390240facc7553"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.176217 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" event={"ID":"46528db3-6717-4abb-a779-33290ae0c986","Type":"ContainerStarted","Data":"0f98910ac46edf192b3f31dbf62a84148214f729deefee91923c8b6d7a85bc87"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.177623 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.189301 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" podStartSLOduration=4.682581726 podStartE2EDuration="19.189275812s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.337185399 +0000 UTC m=+867.638790686" lastFinishedPulling="2025-11-22 10:54:21.843879485 +0000 UTC m=+882.145484772" observedRunningTime="2025-11-22 10:54:24.169505948 +0000 UTC m=+884.471111235" watchObservedRunningTime="2025-11-22 10:54:24.189275812 +0000 UTC m=+884.490881109" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.221638 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" podStartSLOduration=5.143179929 podStartE2EDuration="19.221617585s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.776524726 +0000 UTC m=+868.078130013" lastFinishedPulling="2025-11-22 10:54:21.854962382 +0000 UTC m=+882.156567669" observedRunningTime="2025-11-22 10:54:24.206492594 +0000 UTC m=+884.508097901" watchObservedRunningTime="2025-11-22 10:54:24.221617585 +0000 UTC m=+884.523222872" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.243944 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" event={"ID":"644aaf3f-48c2-4789-9775-18ed3ae24fd7","Type":"ContainerStarted","Data":"f97fec09388ca5c888edf2be05eed96d54c6f55d9e6c89864336e887bc0191db"} Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.243991 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.247704 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" podStartSLOduration=4.211612196 podStartE2EDuration="19.247685649s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:06.804191919 +0000 UTC m=+867.105797206" lastFinishedPulling="2025-11-22 10:54:21.840265362 +0000 UTC m=+882.141870659" observedRunningTime="2025-11-22 10:54:24.245443765 +0000 UTC m=+884.547049052" watchObservedRunningTime="2025-11-22 10:54:24.247685649 +0000 UTC m=+884.549290936" Nov 22 10:54:24 crc kubenswrapper[4926]: E1122 10:54:24.250108 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:d38faa9070da05487afdaa9e261ad39274c2ed862daf42efa460a040431f1991\\\"\"" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" podUID="3ebbbdf8-da82-4f02-a8f5-509de3b56721" Nov 22 10:54:24 crc kubenswrapper[4926]: E1122 10:54:24.250240 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/manila-operator@sha256:b749a5dd8bc718875c3f5e81b38d54d003be77ab92de4a3e9f9595566496a58a\\\"\"" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" podUID="0996e99c-8565-426e-afa0-8a52ff2bee16" Nov 22 10:54:24 crc kubenswrapper[4926]: E1122 10:54:24.250324 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8ee0c0ec6eb84890fc3f6b1ee392fd34adf1720ad94e70e8969b213cce6045f0\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" podUID="355d4b1d-9137-4cf5-aac8-e373d1b7d696" Nov 22 10:54:24 crc kubenswrapper[4926]: E1122 10:54:24.250372 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:c0b5f124a37c1538042c0e63f0978429572e2a851d7f3a6eb80de09b86d755a0\\\"\"" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" podUID="e30ebbd3-daab-4ee4-acea-631c15b5045b" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.270636 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" podStartSLOduration=4.8082533210000005 podStartE2EDuration="19.270617483s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.391949841 +0000 UTC m=+867.693555128" lastFinishedPulling="2025-11-22 10:54:21.854314003 +0000 UTC m=+882.155919290" observedRunningTime="2025-11-22 10:54:24.269317446 +0000 UTC m=+884.570922733" watchObservedRunningTime="2025-11-22 10:54:24.270617483 +0000 UTC m=+884.572222770" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.306266 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" podStartSLOduration=4.812723559 podStartE2EDuration="19.30624385s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.336977763 +0000 UTC m=+867.638583050" lastFinishedPulling="2025-11-22 10:54:21.830498054 +0000 UTC m=+882.132103341" observedRunningTime="2025-11-22 10:54:24.299449866 +0000 UTC m=+884.601055173" watchObservedRunningTime="2025-11-22 10:54:24.30624385 +0000 UTC m=+884.607849147" Nov 22 10:54:24 crc kubenswrapper[4926]: I1122 10:54:24.433502 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" podStartSLOduration=5.31107168 podStartE2EDuration="19.433487231s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.760296993 +0000 UTC m=+868.061902280" lastFinishedPulling="2025-11-22 10:54:21.882712554 +0000 UTC m=+882.184317831" observedRunningTime="2025-11-22 10:54:24.430626009 +0000 UTC m=+884.732231296" watchObservedRunningTime="2025-11-22 10:54:24.433487231 +0000 UTC m=+884.735092518" Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.252976 4926 generic.go:334] "Generic (PLEG): container finished" podID="1062e5f6-8adb-46c7-bf38-872992e70102" containerID="5545ed5d6379fea5a0c62d851f7def11824222ca1aa4da9f17e1a95ad6d6b027" exitCode=0 Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.253043 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerDied","Data":"5545ed5d6379fea5a0c62d851f7def11824222ca1aa4da9f17e1a95ad6d6b027"} Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.255393 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" event={"ID":"7e28261c-db91-4143-a418-1114acf60dc0","Type":"ContainerStarted","Data":"24cc93459c96e82da8a949bd4a53540aa4913cbfa6f204b5e6d1b0dc7bdaa79e"} Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.263433 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" event={"ID":"4c6f9a58-d6f5-426f-bb8d-e019401a015a","Type":"ContainerStarted","Data":"a900852a4bb9c653d9f49aceef92c22deae8854b465a84d52dd55c7cef750740"} Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.304029 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" podStartSLOduration=7.129043007 podStartE2EDuration="20.304012172s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.679558264 +0000 UTC m=+868.981163551" lastFinishedPulling="2025-11-22 10:54:21.854527429 +0000 UTC m=+882.156132716" observedRunningTime="2025-11-22 10:54:25.298592638 +0000 UTC m=+885.600197935" watchObservedRunningTime="2025-11-22 10:54:25.304012172 +0000 UTC m=+885.605617459" Nov 22 10:54:25 crc kubenswrapper[4926]: I1122 10:54:25.317959 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" podStartSLOduration=6.253416721 podStartE2EDuration="20.31794325s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.775798115 +0000 UTC m=+868.077403412" lastFinishedPulling="2025-11-22 10:54:21.840324654 +0000 UTC m=+882.141929941" observedRunningTime="2025-11-22 10:54:25.316155699 +0000 UTC m=+885.617760986" watchObservedRunningTime="2025-11-22 10:54:25.31794325 +0000 UTC m=+885.619548537" Nov 22 10:54:26 crc kubenswrapper[4926]: I1122 10:54:26.270605 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:26 crc kubenswrapper[4926]: I1122 10:54:26.271226 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:28 crc kubenswrapper[4926]: I1122 10:54:28.128562 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.300280 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" event={"ID":"71c1201e-62bb-4d32-945b-80cda1ff41ac","Type":"ContainerStarted","Data":"02a881e3b0fd57bd19eb2959c8c7db948e5fd8e14f725c5b85a5cb4bfabe6d9d"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.300740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.302663 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerStarted","Data":"e6dd440309bd34135bd3c50eaa8f6c73aa287fbf84ddce088baf3928750f6d63"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.304438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" event={"ID":"ec00fa84-7dd0-46d6-b9f2-4a7b687b347b","Type":"ContainerStarted","Data":"700fddf5008b5888fa1270bfe55417d73a8ceff2c8ba3d0883a234a7110f6158"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.304617 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.306237 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" event={"ID":"dab1442d-6ad4-4d03-b520-a12d7a4d6c9d","Type":"ContainerStarted","Data":"668bd57ae86e93b98087002239f4a902592a465509e493e090bc048f1564f6d2"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.306416 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.307747 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" event={"ID":"3ab27f1b-e328-46d1-b9e5-b29e2caedef6","Type":"ContainerStarted","Data":"ac4136103491144066f7be11ebc43958b4d24432ac9b7cf9012853f79644c4ea"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.309466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" event={"ID":"3947549a-e067-4135-ba36-1e2663db15c0","Type":"ContainerStarted","Data":"5ca8ad7d089d6efe9b531f854ff23f300cd9991f35ea1f5312633152b3ad3a77"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.309747 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.312463 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" event={"ID":"8b039ede-62fc-47ed-83ed-672e756887a1","Type":"ContainerStarted","Data":"c6ba6b31ea93f758a0e883c304730a7d1355cf93c0c292d338dbe6e2f598ffc3"} Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.312682 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.323723 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" podStartSLOduration=4.132088617 podStartE2EDuration="25.323703962s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.896732016 +0000 UTC m=+868.198337303" lastFinishedPulling="2025-11-22 10:54:29.088347361 +0000 UTC m=+889.389952648" observedRunningTime="2025-11-22 10:54:30.323015752 +0000 UTC m=+890.624621039" watchObservedRunningTime="2025-11-22 10:54:30.323703962 +0000 UTC m=+890.625309249" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.340228 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" podStartSLOduration=3.489750201 podStartE2EDuration="24.340207412s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.238170428 +0000 UTC m=+868.539775715" lastFinishedPulling="2025-11-22 10:54:29.088627639 +0000 UTC m=+889.390232926" observedRunningTime="2025-11-22 10:54:30.335426526 +0000 UTC m=+890.637031813" watchObservedRunningTime="2025-11-22 10:54:30.340207412 +0000 UTC m=+890.641812689" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.362599 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" podStartSLOduration=4.154122246 podStartE2EDuration="25.362572791s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.882131209 +0000 UTC m=+868.183736496" lastFinishedPulling="2025-11-22 10:54:29.090581754 +0000 UTC m=+889.392187041" observedRunningTime="2025-11-22 10:54:30.361392627 +0000 UTC m=+890.662997924" watchObservedRunningTime="2025-11-22 10:54:30.362572791 +0000 UTC m=+890.664178078" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.379693 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq" podStartSLOduration=3.416145002 podStartE2EDuration="24.379670059s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.194014798 +0000 UTC m=+868.495620085" lastFinishedPulling="2025-11-22 10:54:29.157539855 +0000 UTC m=+889.459145142" observedRunningTime="2025-11-22 10:54:30.376109477 +0000 UTC m=+890.677714774" watchObservedRunningTime="2025-11-22 10:54:30.379670059 +0000 UTC m=+890.681275346" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.401563 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t96vk" podStartSLOduration=4.378322982 podStartE2EDuration="9.401545833s" podCreationTimestamp="2025-11-22 10:54:21 +0000 UTC" firstStartedPulling="2025-11-22 10:54:24.118253406 +0000 UTC m=+884.419858703" lastFinishedPulling="2025-11-22 10:54:29.141476267 +0000 UTC m=+889.443081554" observedRunningTime="2025-11-22 10:54:30.398416404 +0000 UTC m=+890.700021731" watchObservedRunningTime="2025-11-22 10:54:30.401545833 +0000 UTC m=+890.703151110" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.414418 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" podStartSLOduration=4.186269823 podStartE2EDuration="25.414391259s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.907622807 +0000 UTC m=+868.209228094" lastFinishedPulling="2025-11-22 10:54:29.135744243 +0000 UTC m=+889.437349530" observedRunningTime="2025-11-22 10:54:30.411660781 +0000 UTC m=+890.713266078" watchObservedRunningTime="2025-11-22 10:54:30.414391259 +0000 UTC m=+890.715996596" Nov 22 10:54:30 crc kubenswrapper[4926]: I1122 10:54:30.434154 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" podStartSLOduration=3.4882550390000002 podStartE2EDuration="24.434129113s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.193638047 +0000 UTC m=+868.495243334" lastFinishedPulling="2025-11-22 10:54:29.139512121 +0000 UTC m=+889.441117408" observedRunningTime="2025-11-22 10:54:30.429063158 +0000 UTC m=+890.730668455" watchObservedRunningTime="2025-11-22 10:54:30.434129113 +0000 UTC m=+890.735734400" Nov 22 10:54:32 crc kubenswrapper[4926]: I1122 10:54:32.060701 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:32 crc kubenswrapper[4926]: I1122 10:54:32.061070 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:32 crc kubenswrapper[4926]: I1122 10:54:32.125845 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:35 crc kubenswrapper[4926]: I1122 10:54:35.354836 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" event={"ID":"3ebbbdf8-da82-4f02-a8f5-509de3b56721","Type":"ContainerStarted","Data":"f7492084a44277460526386619fcee98d14b26e70d4426c9bf42dc196e623cb1"} Nov 22 10:54:35 crc kubenswrapper[4926]: I1122 10:54:35.358322 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:35 crc kubenswrapper[4926]: I1122 10:54:35.378765 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" podStartSLOduration=3.118756071 podStartE2EDuration="30.37874246s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.762596378 +0000 UTC m=+868.064201665" lastFinishedPulling="2025-11-22 10:54:35.022582767 +0000 UTC m=+895.324188054" observedRunningTime="2025-11-22 10:54:35.37348808 +0000 UTC m=+895.675093407" watchObservedRunningTime="2025-11-22 10:54:35.37874246 +0000 UTC m=+895.680347757" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.064820 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-697c78f669-dfq9w" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.089012 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6498cbf48f-cttxc" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.131813 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-767ccfd65f-8p4kn" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.162386 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-7869d7c46b-np8cn" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.186351 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-598f69df5d-hls4w" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.234740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-99b499f4-7j69z" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.262249 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-669dc6ff5f-crkkv" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.364723 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" event={"ID":"e30ebbd3-daab-4ee4-acea-631c15b5045b","Type":"ContainerStarted","Data":"9720a2d3e966bb958456e08e7148f996989ac3fa1a0ec64d61ee5fe8d31af26e"} Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.365071 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.384541 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" podStartSLOduration=2.411587887 podStartE2EDuration="30.384521731s" podCreationTimestamp="2025-11-22 10:54:06 +0000 UTC" firstStartedPulling="2025-11-22 10:54:08.159090022 +0000 UTC m=+868.460695309" lastFinishedPulling="2025-11-22 10:54:36.132023866 +0000 UTC m=+896.433629153" observedRunningTime="2025-11-22 10:54:36.382272217 +0000 UTC m=+896.683877514" watchObservedRunningTime="2025-11-22 10:54:36.384521731 +0000 UTC m=+896.686127018" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.401981 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7454b96578-dvdzj" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.513283 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-7875d8bb94-pr7tn" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.513566 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-5f449d8fbc-bfqxw" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.570532 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-cfbb9c588-zjkmb" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.620063 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-54cfbf4c7d-c5z8p" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.669576 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b797b8dff-pgzg9" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.709531 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-6d4bf84b58-95jv5" Nov 22 10:54:36 crc kubenswrapper[4926]: I1122 10:54:36.877796 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-6f44bf845f-7vhg5" Nov 22 10:54:37 crc kubenswrapper[4926]: I1122 10:54:37.105942 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-85494d54fc-czf4h" Nov 22 10:54:42 crc kubenswrapper[4926]: I1122 10:54:42.105654 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:42 crc kubenswrapper[4926]: I1122 10:54:42.154130 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:43 crc kubenswrapper[4926]: I1122 10:54:43.091525 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t96vk" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="registry-server" containerID="cri-o://e6dd440309bd34135bd3c50eaa8f6c73aa287fbf84ddce088baf3928750f6d63" gracePeriod=2 Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.110673 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" event={"ID":"0996e99c-8565-426e-afa0-8a52ff2bee16","Type":"ContainerStarted","Data":"8c112b82e29a8cc595eabef7faf64c044f6f97bb21c8c6874ba0e8692adedca9"} Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.111559 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.116732 4926 generic.go:334] "Generic (PLEG): container finished" podID="1062e5f6-8adb-46c7-bf38-872992e70102" containerID="e6dd440309bd34135bd3c50eaa8f6c73aa287fbf84ddce088baf3928750f6d63" exitCode=0 Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.116788 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerDied","Data":"e6dd440309bd34135bd3c50eaa8f6c73aa287fbf84ddce088baf3928750f6d63"} Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.119020 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" event={"ID":"355d4b1d-9137-4cf5-aac8-e373d1b7d696","Type":"ContainerStarted","Data":"3070817e97b59ec398519201dccbec63fcedd5c89a45be79d7558112da948b64"} Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.119399 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.132174 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" podStartSLOduration=3.129370464 podStartE2EDuration="40.1321514s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.758268675 +0000 UTC m=+868.059873962" lastFinishedPulling="2025-11-22 10:54:44.761049611 +0000 UTC m=+905.062654898" observedRunningTime="2025-11-22 10:54:45.125607614 +0000 UTC m=+905.427212911" watchObservedRunningTime="2025-11-22 10:54:45.1321514 +0000 UTC m=+905.433756687" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.149578 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" podStartSLOduration=3.266209979 podStartE2EDuration="40.149549007s" podCreationTimestamp="2025-11-22 10:54:05 +0000 UTC" firstStartedPulling="2025-11-22 10:54:07.878451634 +0000 UTC m=+868.180056921" lastFinishedPulling="2025-11-22 10:54:44.761790662 +0000 UTC m=+905.063395949" observedRunningTime="2025-11-22 10:54:45.139603113 +0000 UTC m=+905.441208390" watchObservedRunningTime="2025-11-22 10:54:45.149549007 +0000 UTC m=+905.451154324" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.257282 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.443830 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hnb5\" (UniqueName: \"kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5\") pod \"1062e5f6-8adb-46c7-bf38-872992e70102\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.443876 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content\") pod \"1062e5f6-8adb-46c7-bf38-872992e70102\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.443923 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities\") pod \"1062e5f6-8adb-46c7-bf38-872992e70102\" (UID: \"1062e5f6-8adb-46c7-bf38-872992e70102\") " Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.445045 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities" (OuterVolumeSpecName: "utilities") pod "1062e5f6-8adb-46c7-bf38-872992e70102" (UID: "1062e5f6-8adb-46c7-bf38-872992e70102"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.457176 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5" (OuterVolumeSpecName: "kube-api-access-7hnb5") pod "1062e5f6-8adb-46c7-bf38-872992e70102" (UID: "1062e5f6-8adb-46c7-bf38-872992e70102"). InnerVolumeSpecName "kube-api-access-7hnb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.491035 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1062e5f6-8adb-46c7-bf38-872992e70102" (UID: "1062e5f6-8adb-46c7-bf38-872992e70102"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.545297 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hnb5\" (UniqueName: \"kubernetes.io/projected/1062e5f6-8adb-46c7-bf38-872992e70102-kube-api-access-7hnb5\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.545329 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:45 crc kubenswrapper[4926]: I1122 10:54:45.545338 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1062e5f6-8adb-46c7-bf38-872992e70102-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.128424 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t96vk" event={"ID":"1062e5f6-8adb-46c7-bf38-872992e70102","Type":"ContainerDied","Data":"053e0f9409bcf302c9e627dbfb3afbcc1d59ca34821af02b5b22d1d16abb8092"} Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.128473 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t96vk" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.128488 4926 scope.go:117] "RemoveContainer" containerID="e6dd440309bd34135bd3c50eaa8f6c73aa287fbf84ddce088baf3928750f6d63" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.135474 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-7969689c84-qshq6" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.147503 4926 scope.go:117] "RemoveContainer" containerID="5545ed5d6379fea5a0c62d851f7def11824222ca1aa4da9f17e1a95ad6d6b027" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.179731 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.186552 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t96vk"] Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.189459 4926 scope.go:117] "RemoveContainer" containerID="19651149505e537358620d38f7df48b729baea6be93550859dc8c5287f81e67e" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.593358 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" path="/var/lib/kubelet/pods/1062e5f6-8adb-46c7-bf38-872992e70102/volumes" Nov 22 10:54:46 crc kubenswrapper[4926]: I1122 10:54:46.690210 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d656998f4-j7xg8" Nov 22 10:54:56 crc kubenswrapper[4926]: I1122 10:54:56.489155 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-58f887965d-r2ctj" Nov 22 10:54:56 crc kubenswrapper[4926]: I1122 10:54:56.677423 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-587df66445-2hwd8" Nov 22 10:55:09 crc kubenswrapper[4926]: I1122 10:55:09.661447 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:55:09 crc kubenswrapper[4926]: I1122 10:55:09.662519 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.257476 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:20 crc kubenswrapper[4926]: E1122 10:55:20.262646 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="extract-content" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.262733 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="extract-content" Nov 22 10:55:20 crc kubenswrapper[4926]: E1122 10:55:20.262813 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="registry-server" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.262879 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="registry-server" Nov 22 10:55:20 crc kubenswrapper[4926]: E1122 10:55:20.262981 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="extract-utilities" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.263043 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="extract-utilities" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.263268 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1062e5f6-8adb-46c7-bf38-872992e70102" containerName="registry-server" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.264295 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.267390 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.271670 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rwgpp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.271834 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.271839 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.271705 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.319837 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.320912 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.323514 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.329992 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.450446 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f87dt\" (UniqueName: \"kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.450541 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.450580 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.450616 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lsmr\" (UniqueName: \"kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.450794 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.552481 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.552535 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.552578 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lsmr\" (UniqueName: \"kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.552613 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.552638 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f87dt\" (UniqueName: \"kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.553498 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.553569 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.553813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.574765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lsmr\" (UniqueName: \"kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr\") pod \"dnsmasq-dns-675f4bcbfc-5nzzp\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.574797 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f87dt\" (UniqueName: \"kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt\") pod \"dnsmasq-dns-78dd6ddcc-b7srl\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.587329 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.634241 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.870521 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:20 crc kubenswrapper[4926]: I1122 10:55:20.881912 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 10:55:21 crc kubenswrapper[4926]: I1122 10:55:21.001465 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:21 crc kubenswrapper[4926]: W1122 10:55:21.012104 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7cfb8ad_ae0a_413d_a2fd_f7697aaca2fe.slice/crio-54f38416b2b44e5a81885fc4c160cd6cefa7a25c0d11eb04ff50bc02952438de WatchSource:0}: Error finding container 54f38416b2b44e5a81885fc4c160cd6cefa7a25c0d11eb04ff50bc02952438de: Status 404 returned error can't find the container with id 54f38416b2b44e5a81885fc4c160cd6cefa7a25c0d11eb04ff50bc02952438de Nov 22 10:55:21 crc kubenswrapper[4926]: I1122 10:55:21.430489 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" event={"ID":"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe","Type":"ContainerStarted","Data":"54f38416b2b44e5a81885fc4c160cd6cefa7a25c0d11eb04ff50bc02952438de"} Nov 22 10:55:21 crc kubenswrapper[4926]: I1122 10:55:21.432467 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" event={"ID":"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53","Type":"ContainerStarted","Data":"cd362712c21ac6765b547392e34362f6be34bcf1055d552a7e8ba2025f5b7036"} Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.046461 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.077047 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.092644 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.093686 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.193244 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-956x5\" (UniqueName: \"kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.193291 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.193354 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.296033 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-956x5\" (UniqueName: \"kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.296078 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.296112 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.297024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.297734 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.316222 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-956x5\" (UniqueName: \"kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5\") pod \"dnsmasq-dns-5ccc8479f9-s7wk2\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.322091 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.373955 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.375214 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.379522 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.424443 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.501001 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.501057 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnf5l\" (UniqueName: \"kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.501231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.602552 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnf5l\" (UniqueName: \"kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.602629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.602719 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.603541 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.603621 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.629343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnf5l\" (UniqueName: \"kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l\") pod \"dnsmasq-dns-57d769cc4f-22bgs\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:23 crc kubenswrapper[4926]: I1122 10:55:23.689688 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.221836 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.223635 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.225489 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.225607 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qd6nx" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.225452 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.226005 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.226201 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.227440 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.227475 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.238169 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312334 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312367 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312403 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312441 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312529 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s476h\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312624 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312734 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312812 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312845 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.312879 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.413998 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414291 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414318 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414360 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414389 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414451 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414477 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414548 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414612 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414656 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s476h\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414688 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.414792 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.415099 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.415443 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.415693 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.415963 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.416474 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.419134 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.419574 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.431587 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s476h\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.432683 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.433606 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.441610 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.479151 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.480812 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.482921 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.483029 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.483212 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-r895p" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.483213 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.483709 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.483977 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.484022 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.498588 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.515824 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.515920 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516081 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516253 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516297 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n82q\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516613 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.516635 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.592875 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617701 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617816 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617835 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617853 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617878 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617914 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617942 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617963 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.617994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.618016 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n82q\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.618050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.618296 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.618577 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.619035 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.619277 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.619802 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.619973 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.622377 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.623230 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.623677 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.635362 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n82q\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.637663 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.647866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info\") pod \"rabbitmq-server-0\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " pod="openstack/rabbitmq-server-0" Nov 22 10:55:24 crc kubenswrapper[4926]: I1122 10:55:24.804502 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.097376 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.099134 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.106285 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.106753 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.106927 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9nmx9" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.107660 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.112453 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.113343 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146284 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-default\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146367 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146399 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-generated\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146425 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146455 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvjm4\" (UniqueName: \"kubernetes.io/projected/94fdd08c-2339-4d12-90bf-fbd407185f34-kube-api-access-xvjm4\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146552 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-kolla-config\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.146636 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-operator-scripts\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247686 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-generated\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247735 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247751 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvjm4\" (UniqueName: \"kubernetes.io/projected/94fdd08c-2339-4d12-90bf-fbd407185f34-kube-api-access-xvjm4\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-kolla-config\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-operator-scripts\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.247853 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-default\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.248198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-generated\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.248310 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.249029 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-kolla-config\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.249089 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-config-data-default\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.250499 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94fdd08c-2339-4d12-90bf-fbd407185f34-operator-scripts\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.253238 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.253474 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94fdd08c-2339-4d12-90bf-fbd407185f34-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.266653 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvjm4\" (UniqueName: \"kubernetes.io/projected/94fdd08c-2339-4d12-90bf-fbd407185f34-kube-api-access-xvjm4\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.278929 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-galera-0\" (UID: \"94fdd08c-2339-4d12-90bf-fbd407185f34\") " pod="openstack/openstack-galera-0" Nov 22 10:55:26 crc kubenswrapper[4926]: I1122 10:55:26.422567 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.438604 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.441289 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.444181 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.444569 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.444948 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-hxlrz" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.446168 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.461671 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563432 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563529 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563557 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563674 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563722 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563750 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563803 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.563916 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnjb7\" (UniqueName: \"kubernetes.io/projected/01026c46-6589-4761-80f4-8bb210d71fd9-kube-api-access-hnjb7\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665220 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665274 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665303 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665327 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665356 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665390 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665455 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnjb7\" (UniqueName: \"kubernetes.io/projected/01026c46-6589-4761-80f4-8bb210d71fd9-kube-api-access-hnjb7\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665525 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.665828 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.666135 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.666216 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.666407 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.667350 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/01026c46-6589-4761-80f4-8bb210d71fd9-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.671275 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.681658 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/01026c46-6589-4761-80f4-8bb210d71fd9-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.696969 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.706469 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnjb7\" (UniqueName: \"kubernetes.io/projected/01026c46-6589-4761-80f4-8bb210d71fd9-kube-api-access-hnjb7\") pod \"openstack-cell1-galera-0\" (UID: \"01026c46-6589-4761-80f4-8bb210d71fd9\") " pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.767144 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.873627 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.874591 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.878025 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.878233 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.878328 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-2n8sz" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.886722 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.969983 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-config-data\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.970044 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cww4l\" (UniqueName: \"kubernetes.io/projected/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kube-api-access-cww4l\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.970323 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.970364 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:27 crc kubenswrapper[4926]: I1122 10:55:27.970399 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kolla-config\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.072531 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.072902 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.072936 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kolla-config\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.072980 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-config-data\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.073015 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cww4l\" (UniqueName: \"kubernetes.io/projected/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kube-api-access-cww4l\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.074775 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kolla-config\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.075378 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-config-data\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.077169 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.078088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.091583 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cww4l\" (UniqueName: \"kubernetes.io/projected/6f000ebf-57ae-4f00-9aaf-7583a9ec4abb-kube-api-access-cww4l\") pod \"memcached-0\" (UID: \"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb\") " pod="openstack/memcached-0" Nov 22 10:55:28 crc kubenswrapper[4926]: I1122 10:55:28.190006 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 10:55:29 crc kubenswrapper[4926]: I1122 10:55:29.869812 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:29 crc kubenswrapper[4926]: I1122 10:55:29.871062 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:55:29 crc kubenswrapper[4926]: I1122 10:55:29.877144 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-hgr67" Nov 22 10:55:29 crc kubenswrapper[4926]: I1122 10:55:29.880342 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:29 crc kubenswrapper[4926]: I1122 10:55:29.904747 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l85v\" (UniqueName: \"kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v\") pod \"kube-state-metrics-0\" (UID: \"23355e5d-b81e-47b5-ac62-ee7b22c33708\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4926]: I1122 10:55:30.005985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l85v\" (UniqueName: \"kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v\") pod \"kube-state-metrics-0\" (UID: \"23355e5d-b81e-47b5-ac62-ee7b22c33708\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4926]: I1122 10:55:30.024786 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l85v\" (UniqueName: \"kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v\") pod \"kube-state-metrics-0\" (UID: \"23355e5d-b81e-47b5-ac62-ee7b22c33708\") " pod="openstack/kube-state-metrics-0" Nov 22 10:55:30 crc kubenswrapper[4926]: I1122 10:55:30.191035 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:55:32 crc kubenswrapper[4926]: I1122 10:55:32.979763 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.363834 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-pwfdl"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.364997 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.367528 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hrnx7"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.368059 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.368217 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.368326 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8zc2s" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.369168 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.383600 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.393057 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hrnx7"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.471632 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-etc-ovs\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.471673 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6gls\" (UniqueName: \"kubernetes.io/projected/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-kube-api-access-w6gls\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.471721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/631757e2-e40e-4cc6-a2a3-601c749669b2-scripts\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.471903 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-combined-ca-bundle\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.471971 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-log-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472019 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-run\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472101 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-log\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472161 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472230 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rq2qn\" (UniqueName: \"kubernetes.io/projected/631757e2-e40e-4cc6-a2a3-601c749669b2-kube-api-access-rq2qn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472251 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-lib\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472350 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-scripts\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472415 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-ovn-controller-tls-certs\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.472437 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574102 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-lib\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574195 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-scripts\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574225 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-ovn-controller-tls-certs\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574251 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574282 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-etc-ovs\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574307 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6gls\" (UniqueName: \"kubernetes.io/projected/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-kube-api-access-w6gls\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574346 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/631757e2-e40e-4cc6-a2a3-601c749669b2-scripts\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574384 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-combined-ca-bundle\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574413 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-log-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-run\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574483 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-log\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574517 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574553 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rq2qn\" (UniqueName: \"kubernetes.io/projected/631757e2-e40e-4cc6-a2a3-601c749669b2-kube-api-access-rq2qn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-lib\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.574679 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.575521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-log-ovn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.575629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-log\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.575786 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-etc-ovs\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.576081 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/631757e2-e40e-4cc6-a2a3-601c749669b2-var-run\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.576096 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-var-run\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.577772 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/631757e2-e40e-4cc6-a2a3-601c749669b2-scripts\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.577917 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-scripts\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.581765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-combined-ca-bundle\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.586707 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/631757e2-e40e-4cc6-a2a3-601c749669b2-ovn-controller-tls-certs\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.591195 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rq2qn\" (UniqueName: \"kubernetes.io/projected/631757e2-e40e-4cc6-a2a3-601c749669b2-kube-api-access-rq2qn\") pod \"ovn-controller-pwfdl\" (UID: \"631757e2-e40e-4cc6-a2a3-601c749669b2\") " pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.595018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6gls\" (UniqueName: \"kubernetes.io/projected/c35b6e33-72ea-4631-8fb0-e21ed5b6b503-kube-api-access-w6gls\") pod \"ovn-controller-ovs-hrnx7\" (UID: \"c35b6e33-72ea-4631-8fb0-e21ed5b6b503\") " pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.691474 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.707593 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.817956 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.822461 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.827181 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.827443 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.827821 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.828063 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-q67wg" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.830679 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.865273 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.980723 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981135 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szgcd\" (UniqueName: \"kubernetes.io/projected/50a6898f-08ef-48de-bcc5-35b49915cff6-kube-api-access-szgcd\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981226 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981282 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-config\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981307 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981327 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:33 crc kubenswrapper[4926]: I1122 10:55:33.981351 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082377 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-config\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082424 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082442 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082498 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082515 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082547 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szgcd\" (UniqueName: \"kubernetes.io/projected/50a6898f-08ef-48de-bcc5-35b49915cff6-kube-api-access-szgcd\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082611 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.082884 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.083119 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.083353 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-config\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.083726 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/50a6898f-08ef-48de-bcc5-35b49915cff6-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.088274 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.089602 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.090148 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50a6898f-08ef-48de-bcc5-35b49915cff6-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.116796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szgcd\" (UniqueName: \"kubernetes.io/projected/50a6898f-08ef-48de-bcc5-35b49915cff6-kube-api-access-szgcd\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.130082 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"50a6898f-08ef-48de-bcc5-35b49915cff6\") " pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:34 crc kubenswrapper[4926]: I1122 10:55:34.147389 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.589915 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.591218 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.595523 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.596234 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.598213 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-7f7g5" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.598818 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.609226 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732009 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732082 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732119 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snlzd\" (UniqueName: \"kubernetes.io/projected/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-kube-api-access-snlzd\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732230 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732276 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-config\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732294 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.732318 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835042 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835276 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snlzd\" (UniqueName: \"kubernetes.io/projected/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-kube-api-access-snlzd\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835324 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-config\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835389 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835410 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835441 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.835467 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.836018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.836216 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.836835 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-config\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.838002 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.840353 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.841355 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.851964 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.852311 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snlzd\" (UniqueName: \"kubernetes.io/projected/49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0-kube-api-access-snlzd\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.865579 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"ovsdbserver-sb-0\" (UID: \"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0\") " pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:36 crc kubenswrapper[4926]: I1122 10:55:36.912982 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:37 crc kubenswrapper[4926]: I1122 10:55:37.591835 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01026c46-6589-4761-80f4-8bb210d71fd9","Type":"ContainerStarted","Data":"204e04c8dcffda193727986460aabd7359468b49f183bc6477142422d8667e2a"} Nov 22 10:55:37 crc kubenswrapper[4926]: I1122 10:55:37.931744 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.375577 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.375971 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f87dt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-b7srl_openstack(5b1fdcd2-3cc9-4dc1-949e-7189999a0f53): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.378075 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" podUID="5b1fdcd2-3cc9-4dc1-949e-7189999a0f53" Nov 22 10:55:38 crc kubenswrapper[4926]: W1122 10:55:38.402437 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f000ebf_57ae_4f00_9aaf_7583a9ec4abb.slice/crio-5c501f35303b7b6498ff6cb80747d5c3f6e2fc2ddc7df3c7281af9660a814198 WatchSource:0}: Error finding container 5c501f35303b7b6498ff6cb80747d5c3f6e2fc2ddc7df3c7281af9660a814198: Status 404 returned error can't find the container with id 5c501f35303b7b6498ff6cb80747d5c3f6e2fc2ddc7df3c7281af9660a814198 Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.424307 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.425498 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8lsmr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-5nzzp_openstack(a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:55:38 crc kubenswrapper[4926]: E1122 10:55:38.426643 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" podUID="a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe" Nov 22 10:55:38 crc kubenswrapper[4926]: I1122 10:55:38.673052 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb","Type":"ContainerStarted","Data":"5c501f35303b7b6498ff6cb80747d5c3f6e2fc2ddc7df3c7281af9660a814198"} Nov 22 10:55:38 crc kubenswrapper[4926]: I1122 10:55:38.787226 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:38 crc kubenswrapper[4926]: W1122 10:55:38.802878 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod99e8179f_b7fe_402c_bbb6_fcb8b0ab1abb.slice/crio-accda89db61367c9b42a9ba935ee3143053cfcb08e38846d3800e9fb1bded8fc WatchSource:0}: Error finding container accda89db61367c9b42a9ba935ee3143053cfcb08e38846d3800e9fb1bded8fc: Status 404 returned error can't find the container with id accda89db61367c9b42a9ba935ee3143053cfcb08e38846d3800e9fb1bded8fc Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.050596 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.058200 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.172079 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f87dt\" (UniqueName: \"kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt\") pod \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.172166 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config\") pod \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.172188 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config\") pod \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.172212 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lsmr\" (UniqueName: \"kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr\") pod \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\" (UID: \"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe\") " Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.172292 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc\") pod \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\" (UID: \"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53\") " Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.173691 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config" (OuterVolumeSpecName: "config") pod "a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe" (UID: "a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.173715 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config" (OuterVolumeSpecName: "config") pod "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53" (UID: "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.174045 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53" (UID: "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.179053 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr" (OuterVolumeSpecName: "kube-api-access-8lsmr") pod "a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe" (UID: "a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe"). InnerVolumeSpecName "kube-api-access-8lsmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.180110 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt" (OuterVolumeSpecName: "kube-api-access-f87dt") pod "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53" (UID: "5b1fdcd2-3cc9-4dc1-949e-7189999a0f53"). InnerVolumeSpecName "kube-api-access-f87dt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.274449 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.274492 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f87dt\" (UniqueName: \"kubernetes.io/projected/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-kube-api-access-f87dt\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.274505 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.274513 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.274523 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lsmr\" (UniqueName: \"kubernetes.io/projected/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe-kube-api-access-8lsmr\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.403764 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.422666 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.434807 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.442758 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.446333 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:55:39 crc kubenswrapper[4926]: W1122 10:55:39.454305 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod94fdd08c_2339_4d12_90bf_fbd407185f34.slice/crio-cfdf169a52d45f70e90f4a54b6dd9a329efa00448e3b7f572d2a215c39538fdd WatchSource:0}: Error finding container cfdf169a52d45f70e90f4a54b6dd9a329efa00448e3b7f572d2a215c39538fdd: Status 404 returned error can't find the container with id cfdf169a52d45f70e90f4a54b6dd9a329efa00448e3b7f572d2a215c39538fdd Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.457121 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl"] Nov 22 10:55:39 crc kubenswrapper[4926]: W1122 10:55:39.462017 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod631757e2_e40e_4cc6_a2a3_601c749669b2.slice/crio-01dd000f875bc4973366612e1a2dadba39c6c7b948147cc767954cd70d17c76c WatchSource:0}: Error finding container 01dd000f875bc4973366612e1a2dadba39c6c7b948147cc767954cd70d17c76c: Status 404 returned error can't find the container with id 01dd000f875bc4973366612e1a2dadba39c6c7b948147cc767954cd70d17c76c Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.559544 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 10:55:39 crc kubenswrapper[4926]: W1122 10:55:39.579741 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod49d1aa9f_06e0_48b0_b0a7_ab459f6ed4d0.slice/crio-92f3c3c7679843b81229af1669a5e2035ea542cc64ade0a13058dc3024575dcf WatchSource:0}: Error finding container 92f3c3c7679843b81229af1669a5e2035ea542cc64ade0a13058dc3024575dcf: Status 404 returned error can't find the container with id 92f3c3c7679843b81229af1669a5e2035ea542cc64ade0a13058dc3024575dcf Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.609804 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerStarted","Data":"bd0a4bd4b8f9eee343783720cbb0a43220efd18bffa8ac140ff949fcddfbada1"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.619510 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23355e5d-b81e-47b5-ac62-ee7b22c33708","Type":"ContainerStarted","Data":"f9e2e3d3a31330d06cf229cf68801aa045e4a9d244a781e87a7a8b8aa1a6b5cf"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.625781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" event={"ID":"a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe","Type":"ContainerDied","Data":"54f38416b2b44e5a81885fc4c160cd6cefa7a25c0d11eb04ff50bc02952438de"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.625921 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-5nzzp" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.626997 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hrnx7"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.627961 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" event={"ID":"5b1fdcd2-3cc9-4dc1-949e-7189999a0f53","Type":"ContainerDied","Data":"cd362712c21ac6765b547392e34362f6be34bcf1055d552a7e8ba2025f5b7036"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.628078 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-b7srl" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.638495 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" event={"ID":"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb","Type":"ContainerStarted","Data":"accda89db61367c9b42a9ba935ee3143053cfcb08e38846d3800e9fb1bded8fc"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.641496 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerStarted","Data":"0b419579f8605414e46f947c15d8d5648d45958e9445d4c27227b6294a4dbf1a"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.642851 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"94fdd08c-2339-4d12-90bf-fbd407185f34","Type":"ContainerStarted","Data":"cfdf169a52d45f70e90f4a54b6dd9a329efa00448e3b7f572d2a215c39538fdd"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.644847 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0","Type":"ContainerStarted","Data":"92f3c3c7679843b81229af1669a5e2035ea542cc64ade0a13058dc3024575dcf"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.646969 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl" event={"ID":"631757e2-e40e-4cc6-a2a3-601c749669b2","Type":"ContainerStarted","Data":"01dd000f875bc4973366612e1a2dadba39c6c7b948147cc767954cd70d17c76c"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.648567 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" event={"ID":"62b8478a-cfa9-4952-a402-bc004da81057","Type":"ContainerStarted","Data":"bbda5f4b00bbf6d4c16588325e213160685cd18a19c4a466a613ee726e43e4be"} Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.661185 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.661240 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.677602 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.683803 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-5nzzp"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.715576 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:39 crc kubenswrapper[4926]: I1122 10:55:39.721025 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-b7srl"] Nov 22 10:55:40 crc kubenswrapper[4926]: W1122 10:55:40.324970 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc35b6e33_72ea_4631_8fb0_e21ed5b6b503.slice/crio-36a5e8abeb78ebb3c62c07d99b6bb6bbe8743ec5529113d21063d6c83d7720db WatchSource:0}: Error finding container 36a5e8abeb78ebb3c62c07d99b6bb6bbe8743ec5529113d21063d6c83d7720db: Status 404 returned error can't find the container with id 36a5e8abeb78ebb3c62c07d99b6bb6bbe8743ec5529113d21063d6c83d7720db Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.472420 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.594597 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b1fdcd2-3cc9-4dc1-949e-7189999a0f53" path="/var/lib/kubelet/pods/5b1fdcd2-3cc9-4dc1-949e-7189999a0f53/volumes" Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.595009 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe" path="/var/lib/kubelet/pods/a7cfb8ad-ae0a-413d-a2fd-f7697aaca2fe/volumes" Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.675820 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6f000ebf-57ae-4f00-9aaf-7583a9ec4abb","Type":"ContainerStarted","Data":"cd60101e2f51f31d21eb7f7998bddf8b02e976a488db366af7a31840d9912ffc"} Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.676776 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.683436 4926 generic.go:334] "Generic (PLEG): container finished" podID="62b8478a-cfa9-4952-a402-bc004da81057" containerID="02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9" exitCode=0 Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.683677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" event={"ID":"62b8478a-cfa9-4952-a402-bc004da81057","Type":"ContainerDied","Data":"02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9"} Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.685711 4926 generic.go:334] "Generic (PLEG): container finished" podID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerID="e56180d4c5344ea74cfaf216d4437e66efc7343313959a311e8afba542f5bc84" exitCode=0 Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.685805 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" event={"ID":"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb","Type":"ContainerDied","Data":"e56180d4c5344ea74cfaf216d4437e66efc7343313959a311e8afba542f5bc84"} Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.687587 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"50a6898f-08ef-48de-bcc5-35b49915cff6","Type":"ContainerStarted","Data":"d0139391445128c3ce18927260c4b72aeacbaecb7c4850a2f011cabe369b129a"} Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.690074 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrnx7" event={"ID":"c35b6e33-72ea-4631-8fb0-e21ed5b6b503","Type":"ContainerStarted","Data":"36a5e8abeb78ebb3c62c07d99b6bb6bbe8743ec5529113d21063d6c83d7720db"} Nov 22 10:55:40 crc kubenswrapper[4926]: I1122 10:55:40.838769 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=11.879549211 podStartE2EDuration="13.838750042s" podCreationTimestamp="2025-11-22 10:55:27 +0000 UTC" firstStartedPulling="2025-11-22 10:55:38.409134482 +0000 UTC m=+958.710739759" lastFinishedPulling="2025-11-22 10:55:40.368335303 +0000 UTC m=+960.669940590" observedRunningTime="2025-11-22 10:55:40.835044376 +0000 UTC m=+961.136649663" watchObservedRunningTime="2025-11-22 10:55:40.838750042 +0000 UTC m=+961.140355339" Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.731092 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"50a6898f-08ef-48de-bcc5-35b49915cff6","Type":"ContainerStarted","Data":"0d2118ea69ccf69e84fff07f8f2e19b4e2a796890f280b937741a8084f9ec972"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.732553 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01026c46-6589-4761-80f4-8bb210d71fd9","Type":"ContainerStarted","Data":"1c9bb378529f326c403ce078f4a5734367b49619cd73ea73287bbcbcef50c156"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.734369 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0","Type":"ContainerStarted","Data":"b7181bbaf7aa083caeb0f56a796578f9e72fc2d120ec2ee69599540a400ea9cb"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.736544 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" event={"ID":"62b8478a-cfa9-4952-a402-bc004da81057","Type":"ContainerStarted","Data":"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.736698 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.739469 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"94fdd08c-2339-4d12-90bf-fbd407185f34","Type":"ContainerStarted","Data":"b6244eb35022859266cd27c848eadf207b9445b1a6b676bf31d28f8c2c7b0d9d"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.743990 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" event={"ID":"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb","Type":"ContainerStarted","Data":"65b6e30d2e74694d237d419ae574205499026b6ae9140631d58ae2b809f03378"} Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.744217 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.799313 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" podStartSLOduration=23.133660067 podStartE2EDuration="23.799291026s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="2025-11-22 10:55:38.805087062 +0000 UTC m=+959.106692359" lastFinishedPulling="2025-11-22 10:55:39.470718031 +0000 UTC m=+959.772323318" observedRunningTime="2025-11-22 10:55:46.796826416 +0000 UTC m=+967.098431703" watchObservedRunningTime="2025-11-22 10:55:46.799291026 +0000 UTC m=+967.100896313" Nov 22 10:55:46 crc kubenswrapper[4926]: I1122 10:55:46.812058 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" podStartSLOduration=23.812032731 podStartE2EDuration="23.812032731s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:46.810151917 +0000 UTC m=+967.111757204" watchObservedRunningTime="2025-11-22 10:55:46.812032731 +0000 UTC m=+967.113638018" Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.754815 4926 generic.go:334] "Generic (PLEG): container finished" podID="c35b6e33-72ea-4631-8fb0-e21ed5b6b503" containerID="10dc67b15b06ff74e0978b8b15eb564032d75d433b0a32cc6b011a2d564cafd3" exitCode=0 Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.754934 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrnx7" event={"ID":"c35b6e33-72ea-4631-8fb0-e21ed5b6b503","Type":"ContainerDied","Data":"10dc67b15b06ff74e0978b8b15eb564032d75d433b0a32cc6b011a2d564cafd3"} Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.762526 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerStarted","Data":"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85"} Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.764406 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23355e5d-b81e-47b5-ac62-ee7b22c33708","Type":"ContainerStarted","Data":"adc75b77dd0d66b919725530cfd81ad0eb550544613815b661b1ef464620d1c5"} Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.764475 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.766504 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl" event={"ID":"631757e2-e40e-4cc6-a2a3-601c749669b2","Type":"ContainerStarted","Data":"1c557145a0501c3cbe707f8e24ec384a033e73fe4ea5dc2784fc8cf3751af5fb"} Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.812533 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-pwfdl" podStartSLOduration=8.729227719 podStartE2EDuration="14.812515053s" podCreationTimestamp="2025-11-22 10:55:33 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.464306078 +0000 UTC m=+959.765911365" lastFinishedPulling="2025-11-22 10:55:45.547593412 +0000 UTC m=+965.849198699" observedRunningTime="2025-11-22 10:55:47.811652149 +0000 UTC m=+968.113257446" watchObservedRunningTime="2025-11-22 10:55:47.812515053 +0000 UTC m=+968.114120340" Nov 22 10:55:47 crc kubenswrapper[4926]: I1122 10:55:47.817241 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.888488064 podStartE2EDuration="18.817228828s" podCreationTimestamp="2025-11-22 10:55:29 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.477844065 +0000 UTC m=+959.779449352" lastFinishedPulling="2025-11-22 10:55:46.406584829 +0000 UTC m=+966.708190116" observedRunningTime="2025-11-22 10:55:47.795206529 +0000 UTC m=+968.096811836" watchObservedRunningTime="2025-11-22 10:55:47.817228828 +0000 UTC m=+968.118834115" Nov 22 10:55:48 crc kubenswrapper[4926]: I1122 10:55:48.191175 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 22 10:55:48 crc kubenswrapper[4926]: I1122 10:55:48.692089 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-pwfdl" Nov 22 10:55:48 crc kubenswrapper[4926]: I1122 10:55:48.774961 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerStarted","Data":"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82"} Nov 22 10:55:48 crc kubenswrapper[4926]: I1122 10:55:48.777691 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrnx7" event={"ID":"c35b6e33-72ea-4631-8fb0-e21ed5b6b503","Type":"ContainerStarted","Data":"502649d7fe9056b0aeba7beced1fe241c68b55d719b141ad0e197c8117f4ad74"} Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.787781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hrnx7" event={"ID":"c35b6e33-72ea-4631-8fb0-e21ed5b6b503","Type":"ContainerStarted","Data":"86b9325841b061e09b345ce567a829ee78f50be504656f9cd81dbc7ba601ec05"} Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.788299 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.790390 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"50a6898f-08ef-48de-bcc5-35b49915cff6","Type":"ContainerStarted","Data":"7fc5e3e31d22160d798dc071bb697adcb99cbd83bb128b8f74f9f716734f21c9"} Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.792638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0","Type":"ContainerStarted","Data":"a127a55eaa0c7f0d2a2bc69fe761511849e2a835f706f8a158b039539a8b0a4c"} Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.808990 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hrnx7" podStartSLOduration=11.978423620000001 podStartE2EDuration="16.808969169s" podCreationTimestamp="2025-11-22 10:55:33 +0000 UTC" firstStartedPulling="2025-11-22 10:55:40.360492739 +0000 UTC m=+960.662098036" lastFinishedPulling="2025-11-22 10:55:45.191038288 +0000 UTC m=+965.492643585" observedRunningTime="2025-11-22 10:55:49.806568551 +0000 UTC m=+970.108173838" watchObservedRunningTime="2025-11-22 10:55:49.808969169 +0000 UTC m=+970.110574456" Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.829346 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=8.894560608 podStartE2EDuration="17.829329231s" podCreationTimestamp="2025-11-22 10:55:32 +0000 UTC" firstStartedPulling="2025-11-22 10:55:40.52875072 +0000 UTC m=+960.830356007" lastFinishedPulling="2025-11-22 10:55:49.463519343 +0000 UTC m=+969.765124630" observedRunningTime="2025-11-22 10:55:49.825962605 +0000 UTC m=+970.127567902" watchObservedRunningTime="2025-11-22 10:55:49.829329231 +0000 UTC m=+970.130934518" Nov 22 10:55:49 crc kubenswrapper[4926]: I1122 10:55:49.847188 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=4.98276842 podStartE2EDuration="14.847171031s" podCreationTimestamp="2025-11-22 10:55:35 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.584803713 +0000 UTC m=+959.886409000" lastFinishedPulling="2025-11-22 10:55:49.449206334 +0000 UTC m=+969.750811611" observedRunningTime="2025-11-22 10:55:49.84117576 +0000 UTC m=+970.142781157" watchObservedRunningTime="2025-11-22 10:55:49.847171031 +0000 UTC m=+970.148776308" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.259103 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.259332 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="dnsmasq-dns" containerID="cri-o://b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12" gracePeriod=10 Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.289465 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.290705 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.332959 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.373874 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.373934 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.373956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6w7z\" (UniqueName: \"kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.475581 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.475631 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.475655 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6w7z\" (UniqueName: \"kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.476501 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.476677 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.496298 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6w7z\" (UniqueName: \"kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z\") pod \"dnsmasq-dns-7cb5889db5-cjghc\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.612069 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.707540 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.780810 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc\") pod \"62b8478a-cfa9-4952-a402-bc004da81057\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.781217 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config\") pod \"62b8478a-cfa9-4952-a402-bc004da81057\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.781357 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnf5l\" (UniqueName: \"kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l\") pod \"62b8478a-cfa9-4952-a402-bc004da81057\" (UID: \"62b8478a-cfa9-4952-a402-bc004da81057\") " Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.784952 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l" (OuterVolumeSpecName: "kube-api-access-nnf5l") pod "62b8478a-cfa9-4952-a402-bc004da81057" (UID: "62b8478a-cfa9-4952-a402-bc004da81057"). InnerVolumeSpecName "kube-api-access-nnf5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.812436 4926 generic.go:334] "Generic (PLEG): container finished" podID="94fdd08c-2339-4d12-90bf-fbd407185f34" containerID="b6244eb35022859266cd27c848eadf207b9445b1a6b676bf31d28f8c2c7b0d9d" exitCode=0 Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.812500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"94fdd08c-2339-4d12-90bf-fbd407185f34","Type":"ContainerDied","Data":"b6244eb35022859266cd27c848eadf207b9445b1a6b676bf31d28f8c2c7b0d9d"} Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.819049 4926 generic.go:334] "Generic (PLEG): container finished" podID="01026c46-6589-4761-80f4-8bb210d71fd9" containerID="1c9bb378529f326c403ce078f4a5734367b49619cd73ea73287bbcbcef50c156" exitCode=0 Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.819110 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01026c46-6589-4761-80f4-8bb210d71fd9","Type":"ContainerDied","Data":"1c9bb378529f326c403ce078f4a5734367b49619cd73ea73287bbcbcef50c156"} Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.823043 4926 generic.go:334] "Generic (PLEG): container finished" podID="62b8478a-cfa9-4952-a402-bc004da81057" containerID="b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12" exitCode=0 Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.823414 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" event={"ID":"62b8478a-cfa9-4952-a402-bc004da81057","Type":"ContainerDied","Data":"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12"} Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.823670 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" event={"ID":"62b8478a-cfa9-4952-a402-bc004da81057","Type":"ContainerDied","Data":"bbda5f4b00bbf6d4c16588325e213160685cd18a19c4a466a613ee726e43e4be"} Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.823798 4926 scope.go:117] "RemoveContainer" containerID="b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.824691 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-22bgs" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.825258 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.881158 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config" (OuterVolumeSpecName: "config") pod "62b8478a-cfa9-4952-a402-bc004da81057" (UID: "62b8478a-cfa9-4952-a402-bc004da81057"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.883848 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.883904 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnf5l\" (UniqueName: \"kubernetes.io/projected/62b8478a-cfa9-4952-a402-bc004da81057-kube-api-access-nnf5l\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.889348 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "62b8478a-cfa9-4952-a402-bc004da81057" (UID: "62b8478a-cfa9-4952-a402-bc004da81057"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.896795 4926 scope.go:117] "RemoveContainer" containerID="02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.930547 4926 scope.go:117] "RemoveContainer" containerID="b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12" Nov 22 10:55:50 crc kubenswrapper[4926]: E1122 10:55:50.931017 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12\": container with ID starting with b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12 not found: ID does not exist" containerID="b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.931060 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12"} err="failed to get container status \"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12\": rpc error: code = NotFound desc = could not find container \"b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12\": container with ID starting with b60c12cb4b2554db3152f11ebc0424ace0ba1c0c50ccf31c17e3538094ac0b12 not found: ID does not exist" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.931090 4926 scope.go:117] "RemoveContainer" containerID="02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9" Nov 22 10:55:50 crc kubenswrapper[4926]: E1122 10:55:50.932745 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9\": container with ID starting with 02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9 not found: ID does not exist" containerID="02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.932778 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9"} err="failed to get container status \"02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9\": rpc error: code = NotFound desc = could not find container \"02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9\": container with ID starting with 02733bc85223a7194e565076362b35053c23e642464f8274921db603b95a3dd9 not found: ID does not exist" Nov 22 10:55:50 crc kubenswrapper[4926]: I1122 10:55:50.985156 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62b8478a-cfa9-4952-a402-bc004da81057-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.029515 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:51 crc kubenswrapper[4926]: W1122 10:55:51.034519 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c468f29_520a_4129_9ad6_8661f644a1a9.slice/crio-45e065964487ac9dfacea74718161f45b2f764036d90d7694206560417938038 WatchSource:0}: Error finding container 45e065964487ac9dfacea74718161f45b2f764036d90d7694206560417938038: Status 404 returned error can't find the container with id 45e065964487ac9dfacea74718161f45b2f764036d90d7694206560417938038 Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.155956 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.160583 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-22bgs"] Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.369212 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:55:51 crc kubenswrapper[4926]: E1122 10:55:51.369605 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="init" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.369625 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="init" Nov 22 10:55:51 crc kubenswrapper[4926]: E1122 10:55:51.369662 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="dnsmasq-dns" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.369671 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="dnsmasq-dns" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.369876 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="62b8478a-cfa9-4952-a402-bc004da81057" containerName="dnsmasq-dns" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.375504 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.377216 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.377329 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-6566v" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.377566 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.377708 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.399278 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.491480 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-lock\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.491573 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-cache\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.491672 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8s7q\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-kube-api-access-m8s7q\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.491716 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.491739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593389 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-cache\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8s7q\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-kube-api-access-m8s7q\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593540 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593628 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593667 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-lock\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: E1122 10:55:51.593749 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:55:51 crc kubenswrapper[4926]: E1122 10:55:51.593768 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:55:51 crc kubenswrapper[4926]: E1122 10:55:51.593812 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift podName:2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251 nodeName:}" failed. No retries permitted until 2025-11-22 10:55:52.093796004 +0000 UTC m=+972.395401291 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift") pod "swift-storage-0" (UID: "2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251") : configmap "swift-ring-files" not found Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.593980 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.594147 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-lock\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.594177 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-cache\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.621245 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8s7q\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-kube-api-access-m8s7q\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.633268 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.836294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"01026c46-6589-4761-80f4-8bb210d71fd9","Type":"ContainerStarted","Data":"d50087b39c392ea02e30796f68f81512a69ac3029f23c094ddc071213496bb57"} Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.839383 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerID="6df874767b0a39226d44aa1d0a18ab461578f58320d3ad6d28b573a67bac3cf2" exitCode=0 Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.839445 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" event={"ID":"1c468f29-520a-4129-9ad6-8661f644a1a9","Type":"ContainerDied","Data":"6df874767b0a39226d44aa1d0a18ab461578f58320d3ad6d28b573a67bac3cf2"} Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.839466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" event={"ID":"1c468f29-520a-4129-9ad6-8661f644a1a9","Type":"ContainerStarted","Data":"45e065964487ac9dfacea74718161f45b2f764036d90d7694206560417938038"} Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.844479 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"94fdd08c-2339-4d12-90bf-fbd407185f34","Type":"ContainerStarted","Data":"86a4ecfa63fb2c6a6de07b1574703b02394939c8d71b2c94032d0d37d5c3d217"} Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.844618 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-bn9s6"] Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.846017 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.848102 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.849451 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.849623 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.865113 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bn9s6"] Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.881043 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=18.467585142 podStartE2EDuration="25.881018773s" podCreationTimestamp="2025-11-22 10:55:26 +0000 UTC" firstStartedPulling="2025-11-22 10:55:37.514395002 +0000 UTC m=+957.816000289" lastFinishedPulling="2025-11-22 10:55:44.927828633 +0000 UTC m=+965.229433920" observedRunningTime="2025-11-22 10:55:51.859978128 +0000 UTC m=+972.161583435" watchObservedRunningTime="2025-11-22 10:55:51.881018773 +0000 UTC m=+972.182624060" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.892507 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=21.427415224 podStartE2EDuration="26.892490303s" podCreationTimestamp="2025-11-22 10:55:25 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.465337388 +0000 UTC m=+959.766942675" lastFinishedPulling="2025-11-22 10:55:44.930412467 +0000 UTC m=+965.232017754" observedRunningTime="2025-11-22 10:55:51.8899344 +0000 UTC m=+972.191539687" watchObservedRunningTime="2025-11-22 10:55:51.892490303 +0000 UTC m=+972.194095590" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.913141 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.913224 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.962430 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.999631 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.999708 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drncd\" (UniqueName: \"kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.999748 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.999915 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:51 crc kubenswrapper[4926]: I1122 10:55:51.999968 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.000195 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.000264 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.101780 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.101862 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.101930 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102027 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: E1122 10:55:52.102048 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:55:52 crc kubenswrapper[4926]: E1122 10:55:52.102067 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102082 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: E1122 10:55:52.102115 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift podName:2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251 nodeName:}" failed. No retries permitted until 2025-11-22 10:55:53.102099052 +0000 UTC m=+973.403704339 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift") pod "swift-storage-0" (UID: "2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251") : configmap "swift-ring-files" not found Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102137 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102194 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drncd\" (UniqueName: \"kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102239 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.102864 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.105375 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.106471 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.107191 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.109690 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.109827 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.120602 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drncd\" (UniqueName: \"kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd\") pod \"swift-ring-rebalance-bn9s6\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.147820 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.184854 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.242261 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.592130 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62b8478a-cfa9-4952-a402-bc004da81057" path="/var/lib/kubelet/pods/62b8478a-cfa9-4952-a402-bc004da81057/volumes" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.718023 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-bn9s6"] Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.854919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bn9s6" event={"ID":"06d59088-e96c-45eb-aba8-00382ceaa48a","Type":"ContainerStarted","Data":"8840ebd28b3069fbf03b1c2dd52264933b5c9c7cb3e272d2c65771e4cd8ca11c"} Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.860105 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" event={"ID":"1c468f29-520a-4129-9ad6-8661f644a1a9","Type":"ContainerStarted","Data":"0d2ce5c3188f6df595356a6d891ddf9aefd529a612d60d295c2f92a33aac7702"} Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.860151 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.881982 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" podStartSLOduration=2.88195581 podStartE2EDuration="2.88195581s" podCreationTimestamp="2025-11-22 10:55:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:52.875932157 +0000 UTC m=+973.177537444" watchObservedRunningTime="2025-11-22 10:55:52.88195581 +0000 UTC m=+973.183561137" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.902187 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 22 10:55:52 crc kubenswrapper[4926]: I1122 10:55:52.908202 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.116600 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:53 crc kubenswrapper[4926]: E1122 10:55:53.116762 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:55:53 crc kubenswrapper[4926]: E1122 10:55:53.116777 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:55:53 crc kubenswrapper[4926]: E1122 10:55:53.116825 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift podName:2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251 nodeName:}" failed. No retries permitted until 2025-11-22 10:55:55.116811575 +0000 UTC m=+975.418416862 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift") pod "swift-storage-0" (UID: "2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251") : configmap "swift-ring-files" not found Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.197635 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.197929 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="dnsmasq-dns" containerID="cri-o://65b6e30d2e74694d237d419ae574205499026b6ae9140631d58ae2b809f03378" gracePeriod=10 Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.199954 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.243481 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.245029 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: W1122 10:55:53.247856 4926 reflector.go:561] object-"openstack"/"ovsdbserver-sb": failed to list *v1.ConfigMap: configmaps "ovsdbserver-sb" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 22 10:55:53 crc kubenswrapper[4926]: E1122 10:55:53.247925 4926 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"ovsdbserver-sb\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"ovsdbserver-sb\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.285303 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.320039 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.320102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.320151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4v7s\" (UniqueName: \"kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.320228 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.342219 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-fvp8n"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.343457 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.347221 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.403590 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fvp8n"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.421833 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-combined-ca-bundle\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.421902 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prskr\" (UniqueName: \"kubernetes.io/projected/d2154a83-1eaa-44bc-ade8-754245e919b2-kube-api-access-prskr\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.421932 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2154a83-1eaa-44bc-ade8-754245e919b2-config\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.421982 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovn-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422029 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422061 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422115 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4v7s\" (UniqueName: \"kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422155 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovs-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.422201 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.423147 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.423798 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.428012 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.98:5353: connect: connection refused" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.443584 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.464665 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.464727 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.473607 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.473694 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-fn9g6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.473935 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.473951 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.477776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4v7s\" (UniqueName: \"kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523570 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523640 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovs-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-combined-ca-bundle\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523724 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prskr\" (UniqueName: \"kubernetes.io/projected/d2154a83-1eaa-44bc-ade8-754245e919b2-kube-api-access-prskr\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2154a83-1eaa-44bc-ade8-754245e919b2-config\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.523779 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovn-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.524130 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovn-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.524411 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d2154a83-1eaa-44bc-ade8-754245e919b2-ovs-rundir\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.524977 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2154a83-1eaa-44bc-ade8-754245e919b2-config\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.528004 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.540171 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.540550 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2154a83-1eaa-44bc-ade8-754245e919b2-combined-ca-bundle\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.595512 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prskr\" (UniqueName: \"kubernetes.io/projected/d2154a83-1eaa-44bc-ade8-754245e919b2-kube-api-access-prskr\") pod \"ovn-controller-metrics-fvp8n\" (UID: \"d2154a83-1eaa-44bc-ade8-754245e919b2\") " pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.615680 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.621153 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.624461 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.624908 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.624967 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.625000 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-scripts\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.625023 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.625068 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.625157 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nx6w\" (UniqueName: \"kubernetes.io/projected/d90df493-f9a0-4774-bd2e-6b96bbfebf31-kube-api-access-7nx6w\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.625219 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-config\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.639500 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.713103 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-fvp8n" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.727822 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.727920 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.727952 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728170 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nx6w\" (UniqueName: \"kubernetes.io/projected/d90df493-f9a0-4774-bd2e-6b96bbfebf31-kube-api-access-7nx6w\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728342 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-config\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728445 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cstqh\" (UniqueName: \"kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728473 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728498 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728532 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-scripts\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728590 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.728715 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.731743 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.732028 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-scripts\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.734315 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.734976 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.735134 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d90df493-f9a0-4774-bd2e-6b96bbfebf31-config\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.748903 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nx6w\" (UniqueName: \"kubernetes.io/projected/d90df493-f9a0-4774-bd2e-6b96bbfebf31-kube-api-access-7nx6w\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.750550 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d90df493-f9a0-4774-bd2e-6b96bbfebf31-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"d90df493-f9a0-4774-bd2e-6b96bbfebf31\") " pod="openstack/ovn-northd-0" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.830024 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.830198 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.830215 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.830330 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cstqh\" (UniqueName: \"kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.830359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.832789 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.833753 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.834398 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.852270 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cstqh\" (UniqueName: \"kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.871324 4926 generic.go:334] "Generic (PLEG): container finished" podID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerID="65b6e30d2e74694d237d419ae574205499026b6ae9140631d58ae2b809f03378" exitCode=0 Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.872009 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" event={"ID":"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb","Type":"ContainerDied","Data":"65b6e30d2e74694d237d419ae574205499026b6ae9140631d58ae2b809f03378"} Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.872967 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:53 crc kubenswrapper[4926]: I1122 10:55:53.958976 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.170780 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.175935 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-fvp8n"] Nov 22 10:55:54 crc kubenswrapper[4926]: W1122 10:55:54.180100 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd2154a83_1eaa_44bc_ade8_754245e919b2.slice/crio-80bcec87c867e996c7a6b2d92bd8f338ce609e41620147c2f665ace91d607139 WatchSource:0}: Error finding container 80bcec87c867e996c7a6b2d92bd8f338ce609e41620147c2f665ace91d607139: Status 404 returned error can't find the container with id 80bcec87c867e996c7a6b2d92bd8f338ce609e41620147c2f665ace91d607139 Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.244768 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-956x5\" (UniqueName: \"kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5\") pod \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.244863 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config\") pod \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.244913 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc\") pod \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\" (UID: \"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb\") " Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.249830 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5" (OuterVolumeSpecName: "kube-api-access-956x5") pod "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" (UID: "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb"). InnerVolumeSpecName "kube-api-access-956x5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.295679 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config" (OuterVolumeSpecName: "config") pod "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" (UID: "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.301871 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" (UID: "99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.347329 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-956x5\" (UniqueName: \"kubernetes.io/projected/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-kube-api-access-956x5\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.347664 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.347675 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:54 crc kubenswrapper[4926]: E1122 10:55:54.424638 4926 configmap.go:193] Couldn't get configMap openstack/ovsdbserver-sb: failed to sync configmap cache: timed out waiting for the condition Nov 22 10:55:54 crc kubenswrapper[4926]: E1122 10:55:54.424719 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb podName:935dca86-6400-44ad-9976-770f56ea6a6b nodeName:}" failed. No retries permitted until 2025-11-22 10:55:54.924700809 +0000 UTC m=+975.226306096 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovsdbserver-sb" (UniqueName: "kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb") pod "dnsmasq-dns-8cc7fc4dc-82m4x" (UID: "935dca86-6400-44ad-9976-770f56ea6a6b") : failed to sync configmap cache: timed out waiting for the condition Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.434809 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 10:55:54 crc kubenswrapper[4926]: W1122 10:55:54.445579 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd90df493_f9a0_4774_bd2e_6b96bbfebf31.slice/crio-0a754158b7b1a8e0acb42f63c17e1ba0083affc151387778f1802ba933868bea WatchSource:0}: Error finding container 0a754158b7b1a8e0acb42f63c17e1ba0083affc151387778f1802ba933868bea: Status 404 returned error can't find the container with id 0a754158b7b1a8e0acb42f63c17e1ba0083affc151387778f1802ba933868bea Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.480946 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.486236 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-m7sk6\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.571654 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.880370 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d90df493-f9a0-4774-bd2e-6b96bbfebf31","Type":"ContainerStarted","Data":"0a754158b7b1a8e0acb42f63c17e1ba0083affc151387778f1802ba933868bea"} Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.881771 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fvp8n" event={"ID":"d2154a83-1eaa-44bc-ade8-754245e919b2","Type":"ContainerStarted","Data":"c1d172ff1387e1360dc9176cd135bdbc4f919c237b1a74f80b87ead3a7ed841d"} Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.881811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-fvp8n" event={"ID":"d2154a83-1eaa-44bc-ade8-754245e919b2","Type":"ContainerStarted","Data":"80bcec87c867e996c7a6b2d92bd8f338ce609e41620147c2f665ace91d607139"} Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.884435 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" event={"ID":"99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb","Type":"ContainerDied","Data":"accda89db61367c9b42a9ba935ee3143053cfcb08e38846d3800e9fb1bded8fc"} Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.884502 4926 scope.go:117] "RemoveContainer" containerID="65b6e30d2e74694d237d419ae574205499026b6ae9140631d58ae2b809f03378" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.884831 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="dnsmasq-dns" containerID="cri-o://0d2ce5c3188f6df595356a6d891ddf9aefd529a612d60d295c2f92a33aac7702" gracePeriod=10 Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.885704 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-s7wk2" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.902465 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-fvp8n" podStartSLOduration=1.902445289 podStartE2EDuration="1.902445289s" podCreationTimestamp="2025-11-22 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:54.898200077 +0000 UTC m=+975.199805444" watchObservedRunningTime="2025-11-22 10:55:54.902445289 +0000 UTC m=+975.204050576" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.920982 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.928720 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-s7wk2"] Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.960362 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:54 crc kubenswrapper[4926]: I1122 10:55:54.963738 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") pod \"dnsmasq-dns-8cc7fc4dc-82m4x\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:55 crc kubenswrapper[4926]: I1122 10:55:55.071367 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:55 crc kubenswrapper[4926]: I1122 10:55:55.164733 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:55 crc kubenswrapper[4926]: E1122 10:55:55.164914 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:55:55 crc kubenswrapper[4926]: E1122 10:55:55.164931 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:55:55 crc kubenswrapper[4926]: E1122 10:55:55.164985 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift podName:2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251 nodeName:}" failed. No retries permitted until 2025-11-22 10:55:59.164968819 +0000 UTC m=+979.466574106 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift") pod "swift-storage-0" (UID: "2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251") : configmap "swift-ring-files" not found Nov 22 10:55:55 crc kubenswrapper[4926]: I1122 10:55:55.903254 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerID="0d2ce5c3188f6df595356a6d891ddf9aefd529a612d60d295c2f92a33aac7702" exitCode=0 Nov 22 10:55:55 crc kubenswrapper[4926]: I1122 10:55:55.903348 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" event={"ID":"1c468f29-520a-4129-9ad6-8661f644a1a9","Type":"ContainerDied","Data":"0d2ce5c3188f6df595356a6d891ddf9aefd529a612d60d295c2f92a33aac7702"} Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.380957 4926 scope.go:117] "RemoveContainer" containerID="e56180d4c5344ea74cfaf216d4437e66efc7343313959a311e8afba542f5bc84" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.422825 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.423910 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.510750 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.607872 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" path="/var/lib/kubelet/pods/99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb/volumes" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.657816 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.690091 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6w7z\" (UniqueName: \"kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z\") pod \"1c468f29-520a-4129-9ad6-8661f644a1a9\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.690325 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config\") pod \"1c468f29-520a-4129-9ad6-8661f644a1a9\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.690376 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc\") pod \"1c468f29-520a-4129-9ad6-8661f644a1a9\" (UID: \"1c468f29-520a-4129-9ad6-8661f644a1a9\") " Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.696625 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z" (OuterVolumeSpecName: "kube-api-access-d6w7z") pod "1c468f29-520a-4129-9ad6-8661f644a1a9" (UID: "1c468f29-520a-4129-9ad6-8661f644a1a9"). InnerVolumeSpecName "kube-api-access-d6w7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.742666 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c468f29-520a-4129-9ad6-8661f644a1a9" (UID: "1c468f29-520a-4129-9ad6-8661f644a1a9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.743377 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config" (OuterVolumeSpecName: "config") pod "1c468f29-520a-4129-9ad6-8661f644a1a9" (UID: "1c468f29-520a-4129-9ad6-8661f644a1a9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.794265 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.794300 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c468f29-520a-4129-9ad6-8661f644a1a9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.794313 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6w7z\" (UniqueName: \"kubernetes.io/projected/1c468f29-520a-4129-9ad6-8661f644a1a9-kube-api-access-d6w7z\") on node \"crc\" DevicePath \"\"" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.916930 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.919842 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bn9s6" event={"ID":"06d59088-e96c-45eb-aba8-00382ceaa48a","Type":"ContainerStarted","Data":"3c756e96c803958d3a4129076f83d73a4b74df88a96a3e20d4ae13c7bf2a0830"} Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.924227 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.926102 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7cb5889db5-cjghc" event={"ID":"1c468f29-520a-4129-9ad6-8661f644a1a9","Type":"ContainerDied","Data":"45e065964487ac9dfacea74718161f45b2f764036d90d7694206560417938038"} Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.926160 4926 scope.go:117] "RemoveContainer" containerID="0d2ce5c3188f6df595356a6d891ddf9aefd529a612d60d295c2f92a33aac7702" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.957941 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-bn9s6" podStartSLOduration=2.184946815 podStartE2EDuration="5.957925974s" podCreationTimestamp="2025-11-22 10:55:51 +0000 UTC" firstStartedPulling="2025-11-22 10:55:52.72129801 +0000 UTC m=+973.022903297" lastFinishedPulling="2025-11-22 10:55:56.494277169 +0000 UTC m=+976.795882456" observedRunningTime="2025-11-22 10:55:56.94213669 +0000 UTC m=+977.243741977" watchObservedRunningTime="2025-11-22 10:55:56.957925974 +0000 UTC m=+977.259531261" Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.963929 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:56 crc kubenswrapper[4926]: I1122 10:55:56.970647 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7cb5889db5-cjghc"] Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.003610 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.086146 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:55:57 crc kubenswrapper[4926]: W1122 10:55:57.223636 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod935dca86_6400_44ad_9976_770f56ea6a6b.slice/crio-e2976ad5a0e5de8b74fff79f499fa72213f8c317975f8e9a214a91b659e68791 WatchSource:0}: Error finding container e2976ad5a0e5de8b74fff79f499fa72213f8c317975f8e9a214a91b659e68791: Status 404 returned error can't find the container with id e2976ad5a0e5de8b74fff79f499fa72213f8c317975f8e9a214a91b659e68791 Nov 22 10:55:57 crc kubenswrapper[4926]: W1122 10:55:57.230935 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37642f49_f656_4aeb_a359_2c32f4cf7919.slice/crio-f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505 WatchSource:0}: Error finding container f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505: Status 404 returned error can't find the container with id f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505 Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.239851 4926 scope.go:117] "RemoveContainer" containerID="6df874767b0a39226d44aa1d0a18ab461578f58320d3ad6d28b573a67bac3cf2" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.767976 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.768347 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.908288 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-6mrs7"] Nov 22 10:55:57 crc kubenswrapper[4926]: E1122 10:55:57.908711 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.908731 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: E1122 10:55:57.908744 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.908752 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: E1122 10:55:57.908763 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="init" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.908772 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="init" Nov 22 10:55:57 crc kubenswrapper[4926]: E1122 10:55:57.908809 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="init" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.908820 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="init" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.909034 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="99e8179f-b7fe-402c-bbb6-fcb8b0ab1abb" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.909051 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" containerName="dnsmasq-dns" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.909679 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.915160 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-6mrs7"] Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.938947 4926 generic.go:334] "Generic (PLEG): container finished" podID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerID="5e857642e9a0242c896faab7453d83e3c726aee2afa9204dea594776ca0877b4" exitCode=0 Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.939010 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" event={"ID":"37642f49-f656-4aeb-a359-2c32f4cf7919","Type":"ContainerDied","Data":"5e857642e9a0242c896faab7453d83e3c726aee2afa9204dea594776ca0877b4"} Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.939035 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" event={"ID":"37642f49-f656-4aeb-a359-2c32f4cf7919","Type":"ContainerStarted","Data":"f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505"} Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.941419 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-9385-account-create-update-w2grf"] Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.942464 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.942741 4926 generic.go:334] "Generic (PLEG): container finished" podID="935dca86-6400-44ad-9976-770f56ea6a6b" containerID="6977ee2f27390631c7627763ddef83b7cda045cc4026277cea14a2be84e4295d" exitCode=0 Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.943856 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" event={"ID":"935dca86-6400-44ad-9976-770f56ea6a6b","Type":"ContainerDied","Data":"6977ee2f27390631c7627763ddef83b7cda045cc4026277cea14a2be84e4295d"} Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.943897 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" event={"ID":"935dca86-6400-44ad-9976-770f56ea6a6b","Type":"ContainerStarted","Data":"e2976ad5a0e5de8b74fff79f499fa72213f8c317975f8e9a214a91b659e68791"} Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.945628 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 22 10:55:57 crc kubenswrapper[4926]: I1122 10:55:57.986429 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9385-account-create-update-w2grf"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.016008 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nbtq\" (UniqueName: \"kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.016236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhnlc\" (UniqueName: \"kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.016304 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.016392 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.117765 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nbtq\" (UniqueName: \"kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.117910 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhnlc\" (UniqueName: \"kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.117948 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.117997 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.118710 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.119722 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.123518 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-6r9dw"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.124920 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.131383 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-8467-account-create-update-psl9r"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.134401 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.138216 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.139161 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nbtq\" (UniqueName: \"kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq\") pod \"keystone-9385-account-create-update-w2grf\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.143381 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhnlc\" (UniqueName: \"kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc\") pod \"keystone-db-create-6mrs7\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.201516 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6r9dw"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.209023 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8467-account-create-update-psl9r"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.218922 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbrwx\" (UniqueName: \"kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.218963 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.219002 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.219067 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmbcj\" (UniqueName: \"kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.322085 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.322239 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmbcj\" (UniqueName: \"kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.322359 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbrwx\" (UniqueName: \"kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.322386 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.323351 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.324094 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.325457 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-r4wxx"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.327228 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.331720 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-r4wxx"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.341186 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6mrs7" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.342074 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmbcj\" (UniqueName: \"kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj\") pod \"placement-8467-account-create-update-psl9r\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.343235 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbrwx\" (UniqueName: \"kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx\") pod \"placement-db-create-6r9dw\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.354036 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.427860 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.428036 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qj8q6\" (UniqueName: \"kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.439422 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-0a97-account-create-update-sx5t4"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.441470 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.448060 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.464092 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0a97-account-create-update-sx5t4"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.496333 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6r9dw" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.517858 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.529675 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.529750 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qj8q6\" (UniqueName: \"kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.529837 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.529969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78d7j\" (UniqueName: \"kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.531532 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.548741 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qj8q6\" (UniqueName: \"kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6\") pod \"glance-db-create-r4wxx\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.609314 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c468f29-520a-4129-9ad6-8661f644a1a9" path="/var/lib/kubelet/pods/1c468f29-520a-4129-9ad6-8661f644a1a9/volumes" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.631094 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78d7j\" (UniqueName: \"kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.631173 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.631945 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.655421 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r4wxx" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.670457 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78d7j\" (UniqueName: \"kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j\") pod \"glance-0a97-account-create-update-sx5t4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.925764 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-6mrs7"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.930235 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.933074 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-9385-account-create-update-w2grf"] Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.951466 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6mrs7" event={"ID":"345b6795-6ff9-4d04-9128-4123b30d27da","Type":"ContainerStarted","Data":"30521ae21d2890e81dc2b22f308caad858b483aab3e1bb481dea3f954dbcff23"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.953039 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9385-account-create-update-w2grf" event={"ID":"8db0114b-7b33-4e75-88ba-cb05b049fa7d","Type":"ContainerStarted","Data":"59342e4a020695e80e125c1fd9d8b0a42b87c1f4d5197770ca79818b0f1f4cd6"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.954962 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" event={"ID":"935dca86-6400-44ad-9976-770f56ea6a6b","Type":"ContainerStarted","Data":"ba699efc11ac7cfa31fc5edbc853bfd84060e7a89bd0f3897551ceef0b8c516d"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.955289 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.956689 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d90df493-f9a0-4774-bd2e-6b96bbfebf31","Type":"ContainerStarted","Data":"cdd6c7e2de9e46e9a5cb8acd267a632885bfdafc7d63f3e81983f8cc97524e1c"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.956718 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"d90df493-f9a0-4774-bd2e-6b96bbfebf31","Type":"ContainerStarted","Data":"0efc3dd97234417f154bddb2ebaf7bb787cfd3b22547e326d29a54bb4f4a528d"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.956819 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.959919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" event={"ID":"37642f49-f656-4aeb-a359-2c32f4cf7919","Type":"ContainerStarted","Data":"4dff75ddd520308ecb18884a161069763659fcb0c42f8df3481c373a2a0addf3"} Nov 22 10:55:58 crc kubenswrapper[4926]: I1122 10:55:58.983443 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" podStartSLOduration=5.983419546 podStartE2EDuration="5.983419546s" podCreationTimestamp="2025-11-22 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:58.975353894 +0000 UTC m=+979.276959191" watchObservedRunningTime="2025-11-22 10:55:58.983419546 +0000 UTC m=+979.285024833" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.000120 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" podStartSLOduration=6.000101375 podStartE2EDuration="6.000101375s" podCreationTimestamp="2025-11-22 10:55:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:58.995918315 +0000 UTC m=+979.297523602" watchObservedRunningTime="2025-11-22 10:55:59.000101375 +0000 UTC m=+979.301706672" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.026740 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.838630882 podStartE2EDuration="6.026724351s" podCreationTimestamp="2025-11-22 10:55:53 +0000 UTC" firstStartedPulling="2025-11-22 10:55:54.448483133 +0000 UTC m=+974.750088420" lastFinishedPulling="2025-11-22 10:55:57.636576602 +0000 UTC m=+977.938181889" observedRunningTime="2025-11-22 10:55:59.021319216 +0000 UTC m=+979.322924513" watchObservedRunningTime="2025-11-22 10:55:59.026724351 +0000 UTC m=+979.328329628" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.124275 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6r9dw"] Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.138278 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-8467-account-create-update-psl9r"] Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.197126 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-r4wxx"] Nov 22 10:55:59 crc kubenswrapper[4926]: W1122 10:55:59.211733 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf5bb2c6_b5be_4094_afe5_380401435ebd.slice/crio-6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0 WatchSource:0}: Error finding container 6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0: Status 404 returned error can't find the container with id 6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0 Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.242492 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:55:59 crc kubenswrapper[4926]: E1122 10:55:59.242699 4926 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 10:55:59 crc kubenswrapper[4926]: E1122 10:55:59.242727 4926 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 10:55:59 crc kubenswrapper[4926]: E1122 10:55:59.242790 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift podName:2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251 nodeName:}" failed. No retries permitted until 2025-11-22 10:56:07.242770665 +0000 UTC m=+987.544375952 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift") pod "swift-storage-0" (UID: "2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251") : configmap "swift-ring-files" not found Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.304608 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.396409 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.503744 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-0a97-account-create-update-sx5t4"] Nov 22 10:55:59 crc kubenswrapper[4926]: W1122 10:55:59.514961 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c28a203_3548_47cc_891f_050d9a3fd7c4.slice/crio-df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1 WatchSource:0}: Error finding container df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1: Status 404 returned error can't find the container with id df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1 Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.572126 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.982178 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6mrs7" event={"ID":"345b6795-6ff9-4d04-9128-4123b30d27da","Type":"ContainerStarted","Data":"f54f0f19b2b290a03232140abf90a1715cd4fa3ac2202c721ff4f8ca53884304"} Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.984436 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0a97-account-create-update-sx5t4" event={"ID":"5c28a203-3548-47cc-891f-050d9a3fd7c4","Type":"ContainerStarted","Data":"df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1"} Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.989257 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9385-account-create-update-w2grf" event={"ID":"8db0114b-7b33-4e75-88ba-cb05b049fa7d","Type":"ContainerStarted","Data":"d1f44a6680f094dbfc9cff4d278be84da0f7900cbe78cd830582f4592d2f64b8"} Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.998850 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r4wxx" event={"ID":"cf5bb2c6-b5be-4094-afe5-380401435ebd","Type":"ContainerStarted","Data":"6c8d5010b00fd6e878882d30f780ec04054d2b4710f7a3acb2ed6c8ae3db3922"} Nov 22 10:55:59 crc kubenswrapper[4926]: I1122 10:55:59.998919 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r4wxx" event={"ID":"cf5bb2c6-b5be-4094-afe5-380401435ebd","Type":"ContainerStarted","Data":"6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0"} Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.001695 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8467-account-create-update-psl9r" event={"ID":"0d5250a1-2aea-43b4-899a-d714d1dbf3ef","Type":"ContainerStarted","Data":"ce24942aa8ce2c5efc5d213d15ff6f145882d6d60592651748244592677777ac"} Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.001724 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8467-account-create-update-psl9r" event={"ID":"0d5250a1-2aea-43b4-899a-d714d1dbf3ef","Type":"ContainerStarted","Data":"b2773cf9b24855c28978a99cb2ad007f0909c015e74de87e0bec9a2f0b47062b"} Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.003945 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6r9dw" event={"ID":"a3ff5805-0887-4e05-98ca-d88590cbe337","Type":"ContainerStarted","Data":"5f4feeeb4ce4a0d4fae2c08fb2a8ea4b18e841efaef2f68ed65614e2cc32f028"} Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.004002 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6r9dw" event={"ID":"a3ff5805-0887-4e05-98ca-d88590cbe337","Type":"ContainerStarted","Data":"e18780e937541eb0ca6221d32267d8d6d40f647798fbb93bf8eb66da712bd614"} Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.008498 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-6mrs7" podStartSLOduration=3.008423425 podStartE2EDuration="3.008423425s" podCreationTimestamp="2025-11-22 10:55:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:55:59.999023404 +0000 UTC m=+980.300628691" watchObservedRunningTime="2025-11-22 10:56:00.008423425 +0000 UTC m=+980.310028722" Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.012103 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-9385-account-create-update-w2grf" podStartSLOduration=3.01208283 podStartE2EDuration="3.01208283s" podCreationTimestamp="2025-11-22 10:55:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:00.011714499 +0000 UTC m=+980.313319786" watchObservedRunningTime="2025-11-22 10:56:00.01208283 +0000 UTC m=+980.313688137" Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.032571 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-6r9dw" podStartSLOduration=2.032548789 podStartE2EDuration="2.032548789s" podCreationTimestamp="2025-11-22 10:55:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:00.026175725 +0000 UTC m=+980.327781022" watchObservedRunningTime="2025-11-22 10:56:00.032548789 +0000 UTC m=+980.334154076" Nov 22 10:56:00 crc kubenswrapper[4926]: I1122 10:56:00.198031 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 10:56:01 crc kubenswrapper[4926]: I1122 10:56:01.033801 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-r4wxx" podStartSLOduration=3.033784064 podStartE2EDuration="3.033784064s" podCreationTimestamp="2025-11-22 10:55:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:01.027151203 +0000 UTC m=+981.328756490" watchObservedRunningTime="2025-11-22 10:56:01.033784064 +0000 UTC m=+981.335389341" Nov 22 10:56:01 crc kubenswrapper[4926]: I1122 10:56:01.053435 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-8467-account-create-update-psl9r" podStartSLOduration=3.053403768 podStartE2EDuration="3.053403768s" podCreationTimestamp="2025-11-22 10:55:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:01.047066406 +0000 UTC m=+981.348671713" watchObservedRunningTime="2025-11-22 10:56:01.053403768 +0000 UTC m=+981.355009055" Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.022715 4926 generic.go:334] "Generic (PLEG): container finished" podID="345b6795-6ff9-4d04-9128-4123b30d27da" containerID="f54f0f19b2b290a03232140abf90a1715cd4fa3ac2202c721ff4f8ca53884304" exitCode=0 Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.022830 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6mrs7" event={"ID":"345b6795-6ff9-4d04-9128-4123b30d27da","Type":"ContainerDied","Data":"f54f0f19b2b290a03232140abf90a1715cd4fa3ac2202c721ff4f8ca53884304"} Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.025305 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf5bb2c6-b5be-4094-afe5-380401435ebd" containerID="6c8d5010b00fd6e878882d30f780ec04054d2b4710f7a3acb2ed6c8ae3db3922" exitCode=0 Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.025362 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r4wxx" event={"ID":"cf5bb2c6-b5be-4094-afe5-380401435ebd","Type":"ContainerDied","Data":"6c8d5010b00fd6e878882d30f780ec04054d2b4710f7a3acb2ed6c8ae3db3922"} Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.027124 4926 generic.go:334] "Generic (PLEG): container finished" podID="a3ff5805-0887-4e05-98ca-d88590cbe337" containerID="5f4feeeb4ce4a0d4fae2c08fb2a8ea4b18e841efaef2f68ed65614e2cc32f028" exitCode=0 Nov 22 10:56:02 crc kubenswrapper[4926]: I1122 10:56:02.027174 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6r9dw" event={"ID":"a3ff5805-0887-4e05-98ca-d88590cbe337","Type":"ContainerDied","Data":"5f4feeeb4ce4a0d4fae2c08fb2a8ea4b18e841efaef2f68ed65614e2cc32f028"} Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.395482 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6r9dw" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.487280 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r4wxx" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.495189 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6mrs7" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.550314 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts\") pod \"a3ff5805-0887-4e05-98ca-d88590cbe337\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.550493 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vbrwx\" (UniqueName: \"kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx\") pod \"a3ff5805-0887-4e05-98ca-d88590cbe337\" (UID: \"a3ff5805-0887-4e05-98ca-d88590cbe337\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.551756 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a3ff5805-0887-4e05-98ca-d88590cbe337" (UID: "a3ff5805-0887-4e05-98ca-d88590cbe337"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.555752 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx" (OuterVolumeSpecName: "kube-api-access-vbrwx") pod "a3ff5805-0887-4e05-98ca-d88590cbe337" (UID: "a3ff5805-0887-4e05-98ca-d88590cbe337"). InnerVolumeSpecName "kube-api-access-vbrwx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.652634 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts\") pod \"cf5bb2c6-b5be-4094-afe5-380401435ebd\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.652791 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhnlc\" (UniqueName: \"kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc\") pod \"345b6795-6ff9-4d04-9128-4123b30d27da\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.653028 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qj8q6\" (UniqueName: \"kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6\") pod \"cf5bb2c6-b5be-4094-afe5-380401435ebd\" (UID: \"cf5bb2c6-b5be-4094-afe5-380401435ebd\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.653113 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts\") pod \"345b6795-6ff9-4d04-9128-4123b30d27da\" (UID: \"345b6795-6ff9-4d04-9128-4123b30d27da\") " Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.653551 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cf5bb2c6-b5be-4094-afe5-380401435ebd" (UID: "cf5bb2c6-b5be-4094-afe5-380401435ebd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.654638 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cf5bb2c6-b5be-4094-afe5-380401435ebd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.654685 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vbrwx\" (UniqueName: \"kubernetes.io/projected/a3ff5805-0887-4e05-98ca-d88590cbe337-kube-api-access-vbrwx\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.654699 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a3ff5805-0887-4e05-98ca-d88590cbe337-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.654843 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "345b6795-6ff9-4d04-9128-4123b30d27da" (UID: "345b6795-6ff9-4d04-9128-4123b30d27da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.656748 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc" (OuterVolumeSpecName: "kube-api-access-rhnlc") pod "345b6795-6ff9-4d04-9128-4123b30d27da" (UID: "345b6795-6ff9-4d04-9128-4123b30d27da"). InnerVolumeSpecName "kube-api-access-rhnlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.657172 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6" (OuterVolumeSpecName: "kube-api-access-qj8q6") pod "cf5bb2c6-b5be-4094-afe5-380401435ebd" (UID: "cf5bb2c6-b5be-4094-afe5-380401435ebd"). InnerVolumeSpecName "kube-api-access-qj8q6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.756372 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhnlc\" (UniqueName: \"kubernetes.io/projected/345b6795-6ff9-4d04-9128-4123b30d27da-kube-api-access-rhnlc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.756417 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qj8q6\" (UniqueName: \"kubernetes.io/projected/cf5bb2c6-b5be-4094-afe5-380401435ebd-kube-api-access-qj8q6\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:03 crc kubenswrapper[4926]: I1122 10:56:03.756430 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/345b6795-6ff9-4d04-9128-4123b30d27da-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.048815 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-r4wxx" event={"ID":"cf5bb2c6-b5be-4094-afe5-380401435ebd","Type":"ContainerDied","Data":"6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0"} Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.049421 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ed14d6c7f4d10cbc5d86ca731da854e2f34f3fa90b989df3b4d256336a6dfe0" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.048831 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-r4wxx" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.051186 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6r9dw" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.051179 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6r9dw" event={"ID":"a3ff5805-0887-4e05-98ca-d88590cbe337","Type":"ContainerDied","Data":"e18780e937541eb0ca6221d32267d8d6d40f647798fbb93bf8eb66da712bd614"} Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.051526 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e18780e937541eb0ca6221d32267d8d6d40f647798fbb93bf8eb66da712bd614" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.067382 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-6mrs7" event={"ID":"345b6795-6ff9-4d04-9128-4123b30d27da","Type":"ContainerDied","Data":"30521ae21d2890e81dc2b22f308caad858b483aab3e1bb481dea3f954dbcff23"} Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.067693 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="30521ae21d2890e81dc2b22f308caad858b483aab3e1bb481dea3f954dbcff23" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.067452 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-6mrs7" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.573249 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.626789 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.627174 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="dnsmasq-dns" containerID="cri-o://ba699efc11ac7cfa31fc5edbc853bfd84060e7a89bd0f3897551ceef0b8c516d" gracePeriod=10 Nov 22 10:56:04 crc kubenswrapper[4926]: I1122 10:56:04.628058 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:56:05 crc kubenswrapper[4926]: I1122 10:56:05.072675 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.113:5353: connect: connection refused" Nov 22 10:56:05 crc kubenswrapper[4926]: I1122 10:56:05.076301 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0a97-account-create-update-sx5t4" event={"ID":"5c28a203-3548-47cc-891f-050d9a3fd7c4","Type":"ContainerStarted","Data":"0f90830d6cc4699c2d70ae9d060b4973a427a356e456c28f9cdbf4cd41becf3c"} Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.086068 4926 generic.go:334] "Generic (PLEG): container finished" podID="8db0114b-7b33-4e75-88ba-cb05b049fa7d" containerID="d1f44a6680f094dbfc9cff4d278be84da0f7900cbe78cd830582f4592d2f64b8" exitCode=0 Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.086422 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9385-account-create-update-w2grf" event={"ID":"8db0114b-7b33-4e75-88ba-cb05b049fa7d","Type":"ContainerDied","Data":"d1f44a6680f094dbfc9cff4d278be84da0f7900cbe78cd830582f4592d2f64b8"} Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.092004 4926 generic.go:334] "Generic (PLEG): container finished" podID="935dca86-6400-44ad-9976-770f56ea6a6b" containerID="ba699efc11ac7cfa31fc5edbc853bfd84060e7a89bd0f3897551ceef0b8c516d" exitCode=0 Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.092080 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" event={"ID":"935dca86-6400-44ad-9976-770f56ea6a6b","Type":"ContainerDied","Data":"ba699efc11ac7cfa31fc5edbc853bfd84060e7a89bd0f3897551ceef0b8c516d"} Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.094144 4926 generic.go:334] "Generic (PLEG): container finished" podID="0d5250a1-2aea-43b4-899a-d714d1dbf3ef" containerID="ce24942aa8ce2c5efc5d213d15ff6f145882d6d60592651748244592677777ac" exitCode=0 Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.094211 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8467-account-create-update-psl9r" event={"ID":"0d5250a1-2aea-43b4-899a-d714d1dbf3ef","Type":"ContainerDied","Data":"ce24942aa8ce2c5efc5d213d15ff6f145882d6d60592651748244592677777ac"} Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.098598 4926 generic.go:334] "Generic (PLEG): container finished" podID="5c28a203-3548-47cc-891f-050d9a3fd7c4" containerID="0f90830d6cc4699c2d70ae9d060b4973a427a356e456c28f9cdbf4cd41becf3c" exitCode=0 Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.098621 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0a97-account-create-update-sx5t4" event={"ID":"5c28a203-3548-47cc-891f-050d9a3fd7c4","Type":"ContainerDied","Data":"0f90830d6cc4699c2d70ae9d060b4973a427a356e456c28f9cdbf4cd41becf3c"} Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.188093 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.215081 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config\") pod \"935dca86-6400-44ad-9976-770f56ea6a6b\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.215147 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4v7s\" (UniqueName: \"kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s\") pod \"935dca86-6400-44ad-9976-770f56ea6a6b\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.215238 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc\") pod \"935dca86-6400-44ad-9976-770f56ea6a6b\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.215270 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") pod \"935dca86-6400-44ad-9976-770f56ea6a6b\" (UID: \"935dca86-6400-44ad-9976-770f56ea6a6b\") " Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.220724 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s" (OuterVolumeSpecName: "kube-api-access-h4v7s") pod "935dca86-6400-44ad-9976-770f56ea6a6b" (UID: "935dca86-6400-44ad-9976-770f56ea6a6b"). InnerVolumeSpecName "kube-api-access-h4v7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.258475 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "935dca86-6400-44ad-9976-770f56ea6a6b" (UID: "935dca86-6400-44ad-9976-770f56ea6a6b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.259276 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config" (OuterVolumeSpecName: "config") pod "935dca86-6400-44ad-9976-770f56ea6a6b" (UID: "935dca86-6400-44ad-9976-770f56ea6a6b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.269974 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "935dca86-6400-44ad-9976-770f56ea6a6b" (UID: "935dca86-6400-44ad-9976-770f56ea6a6b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.318263 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.318296 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4v7s\" (UniqueName: \"kubernetes.io/projected/935dca86-6400-44ad-9976-770f56ea6a6b-kube-api-access-h4v7s\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.318309 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:06 crc kubenswrapper[4926]: I1122 10:56:06.318319 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/935dca86-6400-44ad-9976-770f56ea6a6b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.110367 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" event={"ID":"935dca86-6400-44ad-9976-770f56ea6a6b","Type":"ContainerDied","Data":"e2976ad5a0e5de8b74fff79f499fa72213f8c317975f8e9a214a91b659e68791"} Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.110386 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8cc7fc4dc-82m4x" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.110427 4926 scope.go:117] "RemoveContainer" containerID="ba699efc11ac7cfa31fc5edbc853bfd84060e7a89bd0f3897551ceef0b8c516d" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.116602 4926 generic.go:334] "Generic (PLEG): container finished" podID="06d59088-e96c-45eb-aba8-00382ceaa48a" containerID="3c756e96c803958d3a4129076f83d73a4b74df88a96a3e20d4ae13c7bf2a0830" exitCode=0 Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.118482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bn9s6" event={"ID":"06d59088-e96c-45eb-aba8-00382ceaa48a","Type":"ContainerDied","Data":"3c756e96c803958d3a4129076f83d73a4b74df88a96a3e20d4ae13c7bf2a0830"} Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.144021 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.152796 4926 scope.go:117] "RemoveContainer" containerID="6977ee2f27390631c7627763ddef83b7cda045cc4026277cea14a2be84e4295d" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.159006 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8cc7fc4dc-82m4x"] Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.337607 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.363810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251-etc-swift\") pod \"swift-storage-0\" (UID: \"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251\") " pod="openstack/swift-storage-0" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.451026 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.540520 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts\") pod \"5c28a203-3548-47cc-891f-050d9a3fd7c4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.540590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78d7j\" (UniqueName: \"kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j\") pod \"5c28a203-3548-47cc-891f-050d9a3fd7c4\" (UID: \"5c28a203-3548-47cc-891f-050d9a3fd7c4\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.541240 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5c28a203-3548-47cc-891f-050d9a3fd7c4" (UID: "5c28a203-3548-47cc-891f-050d9a3fd7c4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.544227 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j" (OuterVolumeSpecName: "kube-api-access-78d7j") pod "5c28a203-3548-47cc-891f-050d9a3fd7c4" (UID: "5c28a203-3548-47cc-891f-050d9a3fd7c4"). InnerVolumeSpecName "kube-api-access-78d7j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.544682 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5c28a203-3548-47cc-891f-050d9a3fd7c4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.544716 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78d7j\" (UniqueName: \"kubernetes.io/projected/5c28a203-3548-47cc-891f-050d9a3fd7c4-kube-api-access-78d7j\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.551046 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.558761 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.588243 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.646084 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmbcj\" (UniqueName: \"kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj\") pod \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.646197 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nbtq\" (UniqueName: \"kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq\") pod \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.646224 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts\") pod \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\" (UID: \"0d5250a1-2aea-43b4-899a-d714d1dbf3ef\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.646266 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts\") pod \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\" (UID: \"8db0114b-7b33-4e75-88ba-cb05b049fa7d\") " Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.646923 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8db0114b-7b33-4e75-88ba-cb05b049fa7d" (UID: "8db0114b-7b33-4e75-88ba-cb05b049fa7d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.647684 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0d5250a1-2aea-43b4-899a-d714d1dbf3ef" (UID: "0d5250a1-2aea-43b4-899a-d714d1dbf3ef"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.649770 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj" (OuterVolumeSpecName: "kube-api-access-cmbcj") pod "0d5250a1-2aea-43b4-899a-d714d1dbf3ef" (UID: "0d5250a1-2aea-43b4-899a-d714d1dbf3ef"). InnerVolumeSpecName "kube-api-access-cmbcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.651283 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq" (OuterVolumeSpecName: "kube-api-access-7nbtq") pod "8db0114b-7b33-4e75-88ba-cb05b049fa7d" (UID: "8db0114b-7b33-4e75-88ba-cb05b049fa7d"). InnerVolumeSpecName "kube-api-access-7nbtq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.748113 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nbtq\" (UniqueName: \"kubernetes.io/projected/8db0114b-7b33-4e75-88ba-cb05b049fa7d-kube-api-access-7nbtq\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.748146 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.748155 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8db0114b-7b33-4e75-88ba-cb05b049fa7d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:07 crc kubenswrapper[4926]: I1122 10:56:07.748164 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmbcj\" (UniqueName: \"kubernetes.io/projected/0d5250a1-2aea-43b4-899a-d714d1dbf3ef-kube-api-access-cmbcj\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.087161 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 10:56:08 crc kubenswrapper[4926]: W1122 10:56:08.094295 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d0b3ca4_531c_4c3e_9af7_f5d2b65c7251.slice/crio-8904a7a47387f7284728ae63cb0b90e395b13e5de933ef5a37bac0ce07ad9f3f WatchSource:0}: Error finding container 8904a7a47387f7284728ae63cb0b90e395b13e5de933ef5a37bac0ce07ad9f3f: Status 404 returned error can't find the container with id 8904a7a47387f7284728ae63cb0b90e395b13e5de933ef5a37bac0ce07ad9f3f Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.129510 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-0a97-account-create-update-sx5t4" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.129545 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-0a97-account-create-update-sx5t4" event={"ID":"5c28a203-3548-47cc-891f-050d9a3fd7c4","Type":"ContainerDied","Data":"df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1"} Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.129591 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df6dba5811391751194f2b8c77323bb8c9d4538689313e52238a7f7b03458df1" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.136613 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-9385-account-create-update-w2grf" event={"ID":"8db0114b-7b33-4e75-88ba-cb05b049fa7d","Type":"ContainerDied","Data":"59342e4a020695e80e125c1fd9d8b0a42b87c1f4d5197770ca79818b0f1f4cd6"} Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.137099 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59342e4a020695e80e125c1fd9d8b0a42b87c1f4d5197770ca79818b0f1f4cd6" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.136637 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-9385-account-create-update-w2grf" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.141143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"8904a7a47387f7284728ae63cb0b90e395b13e5de933ef5a37bac0ce07ad9f3f"} Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.144402 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-8467-account-create-update-psl9r" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.144422 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-8467-account-create-update-psl9r" event={"ID":"0d5250a1-2aea-43b4-899a-d714d1dbf3ef","Type":"ContainerDied","Data":"b2773cf9b24855c28978a99cb2ad007f0909c015e74de87e0bec9a2f0b47062b"} Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.144497 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2773cf9b24855c28978a99cb2ad007f0909c015e74de87e0bec9a2f0b47062b" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.499229 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.564832 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.564938 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.564973 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.565240 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.565297 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drncd\" (UniqueName: \"kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.565334 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.565477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts\") pod \"06d59088-e96c-45eb-aba8-00382ceaa48a\" (UID: \"06d59088-e96c-45eb-aba8-00382ceaa48a\") " Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.580447 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.584253 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd" (OuterVolumeSpecName: "kube-api-access-drncd") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "kube-api-access-drncd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.587983 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts" (OuterVolumeSpecName: "scripts") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.593398 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" path="/var/lib/kubelet/pods/935dca86-6400-44ad-9976-770f56ea6a6b/volumes" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.593801 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.597371 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.597970 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.598996 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "06d59088-e96c-45eb-aba8-00382ceaa48a" (UID: "06d59088-e96c-45eb-aba8-00382ceaa48a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.666908 4926 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/06d59088-e96c-45eb-aba8-00382ceaa48a-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.666952 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drncd\" (UniqueName: \"kubernetes.io/projected/06d59088-e96c-45eb-aba8-00382ceaa48a-kube-api-access-drncd\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.666966 4926 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.666980 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.666990 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.667003 4926 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/06d59088-e96c-45eb-aba8-00382ceaa48a-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.667013 4926 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/06d59088-e96c-45eb-aba8-00382ceaa48a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.681838 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-tgj2k"] Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682339 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06d59088-e96c-45eb-aba8-00382ceaa48a" containerName="swift-ring-rebalance" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682362 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="06d59088-e96c-45eb-aba8-00382ceaa48a" containerName="swift-ring-rebalance" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682377 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3ff5805-0887-4e05-98ca-d88590cbe337" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682386 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3ff5805-0887-4e05-98ca-d88590cbe337" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682409 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5bb2c6-b5be-4094-afe5-380401435ebd" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682418 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5bb2c6-b5be-4094-afe5-380401435ebd" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682434 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="dnsmasq-dns" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682442 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="dnsmasq-dns" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682458 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d5250a1-2aea-43b4-899a-d714d1dbf3ef" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682466 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d5250a1-2aea-43b4-899a-d714d1dbf3ef" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682482 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="init" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682492 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="init" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682507 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="345b6795-6ff9-4d04-9128-4123b30d27da" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682517 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="345b6795-6ff9-4d04-9128-4123b30d27da" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682536 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8db0114b-7b33-4e75-88ba-cb05b049fa7d" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682545 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8db0114b-7b33-4e75-88ba-cb05b049fa7d" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: E1122 10:56:08.682562 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c28a203-3548-47cc-891f-050d9a3fd7c4" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682570 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c28a203-3548-47cc-891f-050d9a3fd7c4" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682763 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3ff5805-0887-4e05-98ca-d88590cbe337" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682798 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d5250a1-2aea-43b4-899a-d714d1dbf3ef" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682812 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="345b6795-6ff9-4d04-9128-4123b30d27da" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682838 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf5bb2c6-b5be-4094-afe5-380401435ebd" containerName="mariadb-database-create" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682853 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c28a203-3548-47cc-891f-050d9a3fd7c4" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682868 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="935dca86-6400-44ad-9976-770f56ea6a6b" containerName="dnsmasq-dns" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682911 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="06d59088-e96c-45eb-aba8-00382ceaa48a" containerName="swift-ring-rebalance" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.682926 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8db0114b-7b33-4e75-88ba-cb05b049fa7d" containerName="mariadb-account-create-update" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.683552 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.687715 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.687989 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c98zk" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.699058 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tgj2k"] Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.768691 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q89d5\" (UniqueName: \"kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.768810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.768843 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.769027 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.870174 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.870227 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q89d5\" (UniqueName: \"kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.870294 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.870315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.874827 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.875558 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.877537 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:08 crc kubenswrapper[4926]: I1122 10:56:08.888270 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q89d5\" (UniqueName: \"kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5\") pod \"glance-db-sync-tgj2k\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.001081 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.026807 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.160064 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-bn9s6" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.160048 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-bn9s6" event={"ID":"06d59088-e96c-45eb-aba8-00382ceaa48a","Type":"ContainerDied","Data":"8840ebd28b3069fbf03b1c2dd52264933b5c9c7cb3e272d2c65771e4cd8ca11c"} Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.160395 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8840ebd28b3069fbf03b1c2dd52264933b5c9c7cb3e272d2c65771e4cd8ca11c" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.556988 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-tgj2k"] Nov 22 10:56:09 crc kubenswrapper[4926]: W1122 10:56:09.574430 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda8055b7b_5391_4ded_a3a7_ca9e86b43ec6.slice/crio-39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587 WatchSource:0}: Error finding container 39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587: Status 404 returned error can't find the container with id 39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587 Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.660825 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.660902 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.660941 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.661519 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:56:09 crc kubenswrapper[4926]: I1122 10:56:09.661568 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e" gracePeriod=600 Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.174473 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tgj2k" event={"ID":"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6","Type":"ContainerStarted","Data":"39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587"} Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.201192 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e" exitCode=0 Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.201240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e"} Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.201288 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375"} Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.201304 4926 scope.go:117] "RemoveContainer" containerID="0463c6c1ebc5539c197e7353369d97a26101d13494f1fee9c7e8ab944e7952f8" Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.210877 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"28f693dc3d57927bc9bdd9b76dff8de02459018d7375d2647fbfdb52597c997e"} Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.210946 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"d0a63af8c730420f536b5af5291ea5089b2a0a07be1fbaf41729ebc5eb78228b"} Nov 22 10:56:10 crc kubenswrapper[4926]: I1122 10:56:10.210959 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"cfbc147f361bf2cde72995adf76258159ffd1055ba36866fa285b0de10e512c2"} Nov 22 10:56:11 crc kubenswrapper[4926]: I1122 10:56:11.225339 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"f1f59339a2ea11671fc06bd94a8d6badc2a59fd2610dcf42211bfc132c99df26"} Nov 22 10:56:12 crc kubenswrapper[4926]: I1122 10:56:12.242880 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"8aa1aa9fdf7f1e5353d9eb081d3377a034cd4151dec33f231b737b593818b373"} Nov 22 10:56:12 crc kubenswrapper[4926]: I1122 10:56:12.243458 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"9e46f208a2e39dcd81d53a7b578d79a73dd119856f6a1af338150cf1196d1120"} Nov 22 10:56:12 crc kubenswrapper[4926]: I1122 10:56:12.243475 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"665718eb006cc24fbb89d5421a14180d1202bb66773e871a9fc58312f0c607e0"} Nov 22 10:56:12 crc kubenswrapper[4926]: I1122 10:56:12.243501 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"c72ca81b8e9830316340fbb1dc2586b936bbec82840cd8c72e7bc03d29a88712"} Nov 22 10:56:13 crc kubenswrapper[4926]: I1122 10:56:13.271925 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"7685d3ee3ae44c2a2673c7b6c58b268920ef79c188b8edf7b783f72568c3d671"} Nov 22 10:56:13 crc kubenswrapper[4926]: I1122 10:56:13.272534 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"04f07d72b6d3a6a407efeb0c21e9dbb88e0ae71470508c31ad67d979cd6d06cc"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.294392 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"ea273b6e44a3c5fb06c1fb9696e73bd472b023805aabfeb9f7a018ae827efe81"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.294639 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"d1702e40f11a80adc3825c37c559843232b126e864ebf3ea456ed5195cc749c8"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.294650 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"86a717d1b9b3484ae5de77ef7ee81ba31b18875b041fbeec7c6bd4531bb659ee"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.294659 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"57f61107d2c566b492224c2242b94e410f56b53dcd385d3e85f30934b294f8dc"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.294668 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251","Type":"ContainerStarted","Data":"ebe34caab033da27c6c2c65bca404a18dabc00271a77e5d147a7a60b8392a2f2"} Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.362909 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=19.698118285 podStartE2EDuration="24.362868223s" podCreationTimestamp="2025-11-22 10:55:50 +0000 UTC" firstStartedPulling="2025-11-22 10:56:08.096738271 +0000 UTC m=+988.398343558" lastFinishedPulling="2025-11-22 10:56:12.761488209 +0000 UTC m=+993.063093496" observedRunningTime="2025-11-22 10:56:14.354536204 +0000 UTC m=+994.656141501" watchObservedRunningTime="2025-11-22 10:56:14.362868223 +0000 UTC m=+994.664473520" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.685040 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.686964 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.693317 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.704836 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.782864 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.782956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.782983 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx9vf\" (UniqueName: \"kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.783321 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.783401 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.783466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885499 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885666 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx9vf\" (UniqueName: \"kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885725 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885769 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.885801 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.887132 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.887281 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.887576 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.887721 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.888097 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:14 crc kubenswrapper[4926]: I1122 10:56:14.927191 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx9vf\" (UniqueName: \"kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf\") pod \"dnsmasq-dns-6d5b6d6b67-dm4tj\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:15 crc kubenswrapper[4926]: I1122 10:56:15.029541 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:18 crc kubenswrapper[4926]: I1122 10:56:18.733347 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-pwfdl" podUID="631757e2-e40e-4cc6-a2a3-601c749669b2" containerName="ovn-controller" probeResult="failure" output=< Nov 22 10:56:18 crc kubenswrapper[4926]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 10:56:18 crc kubenswrapper[4926]: > Nov 22 10:56:18 crc kubenswrapper[4926]: I1122 10:56:18.758270 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:56:20 crc kubenswrapper[4926]: I1122 10:56:20.353750 4926 generic.go:334] "Generic (PLEG): container finished" podID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerID="2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85" exitCode=0 Nov 22 10:56:20 crc kubenswrapper[4926]: I1122 10:56:20.353965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerDied","Data":"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85"} Nov 22 10:56:20 crc kubenswrapper[4926]: I1122 10:56:20.357545 4926 generic.go:334] "Generic (PLEG): container finished" podID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerID="6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82" exitCode=0 Nov 22 10:56:20 crc kubenswrapper[4926]: I1122 10:56:20.357597 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerDied","Data":"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82"} Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.245161 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:21 crc kubenswrapper[4926]: W1122 10:56:21.258830 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod659e8c42_1cfd_49fa_a47c_c9c9f15f8a13.slice/crio-a3d1d175300df2ee51edbc898fa9c92d460feaf9be3ad96c23d17e6eee41a8e7 WatchSource:0}: Error finding container a3d1d175300df2ee51edbc898fa9c92d460feaf9be3ad96c23d17e6eee41a8e7: Status 404 returned error can't find the container with id a3d1d175300df2ee51edbc898fa9c92d460feaf9be3ad96c23d17e6eee41a8e7 Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.370298 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerStarted","Data":"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06"} Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.370605 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.373035 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" event={"ID":"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13","Type":"ContainerStarted","Data":"a3d1d175300df2ee51edbc898fa9c92d460feaf9be3ad96c23d17e6eee41a8e7"} Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.377204 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerStarted","Data":"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796"} Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.377853 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 10:56:21 crc kubenswrapper[4926]: I1122 10:56:21.393593 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=52.946817959 podStartE2EDuration="58.393567064s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.483152347 +0000 UTC m=+959.784757634" lastFinishedPulling="2025-11-22 10:55:44.929901452 +0000 UTC m=+965.231506739" observedRunningTime="2025-11-22 10:56:21.390578428 +0000 UTC m=+1001.692183725" watchObservedRunningTime="2025-11-22 10:56:21.393567064 +0000 UTC m=+1001.695172361" Nov 22 10:56:22 crc kubenswrapper[4926]: I1122 10:56:22.386032 4926 generic.go:334] "Generic (PLEG): container finished" podID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerID="79bc475f2e0f231025dd4ae0b1d044857c501d86cf24cd40d645b4071a2e0558" exitCode=0 Nov 22 10:56:22 crc kubenswrapper[4926]: I1122 10:56:22.386238 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" event={"ID":"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13","Type":"ContainerDied","Data":"79bc475f2e0f231025dd4ae0b1d044857c501d86cf24cd40d645b4071a2e0558"} Nov 22 10:56:22 crc kubenswrapper[4926]: I1122 10:56:22.390603 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tgj2k" event={"ID":"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6","Type":"ContainerStarted","Data":"04a51ac8e1698115caf042a93f113f50deb41ab5145e1e6d37b799736ef2cb52"} Nov 22 10:56:22 crc kubenswrapper[4926]: I1122 10:56:22.409145 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=53.483576857 podStartE2EDuration="59.409114071s" podCreationTimestamp="2025-11-22 10:55:23 +0000 UTC" firstStartedPulling="2025-11-22 10:55:39.482715774 +0000 UTC m=+959.784321061" lastFinishedPulling="2025-11-22 10:55:45.408252988 +0000 UTC m=+965.709858275" observedRunningTime="2025-11-22 10:56:21.417335797 +0000 UTC m=+1001.718941084" watchObservedRunningTime="2025-11-22 10:56:22.409114071 +0000 UTC m=+1002.710719358" Nov 22 10:56:22 crc kubenswrapper[4926]: I1122 10:56:22.434002 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-tgj2k" podStartSLOduration=3.11338876 podStartE2EDuration="14.433978116s" podCreationTimestamp="2025-11-22 10:56:08 +0000 UTC" firstStartedPulling="2025-11-22 10:56:09.577296032 +0000 UTC m=+989.878901319" lastFinishedPulling="2025-11-22 10:56:20.897885388 +0000 UTC m=+1001.199490675" observedRunningTime="2025-11-22 10:56:22.422257379 +0000 UTC m=+1002.723862666" watchObservedRunningTime="2025-11-22 10:56:22.433978116 +0000 UTC m=+1002.735583393" Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.730685 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-pwfdl" podUID="631757e2-e40e-4cc6-a2a3-601c749669b2" containerName="ovn-controller" probeResult="failure" output=< Nov 22 10:56:23 crc kubenswrapper[4926]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 10:56:23 crc kubenswrapper[4926]: > Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.748313 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hrnx7" Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.952474 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-pwfdl-config-k2fqr"] Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.953826 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.960833 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl-config-k2fqr"] Nov 22 10:56:23 crc kubenswrapper[4926]: I1122 10:56:23.961904 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.074858 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djfhs\" (UniqueName: \"kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.074954 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.074986 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.075063 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.075206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.075270 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177128 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177263 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177581 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177607 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177293 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177765 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djfhs\" (UniqueName: \"kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177828 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.177872 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.178284 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.178784 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.179213 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.203685 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djfhs\" (UniqueName: \"kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs\") pod \"ovn-controller-pwfdl-config-k2fqr\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.269690 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.410844 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" event={"ID":"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13","Type":"ContainerStarted","Data":"e2bddfc966f356d619dfc8e4082a9545f5020cec227d7cb67bd890ad9679ce8f"} Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.411327 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.436630 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" podStartSLOduration=10.43661025 podStartE2EDuration="10.43661025s" podCreationTimestamp="2025-11-22 10:56:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:24.433734807 +0000 UTC m=+1004.735340094" watchObservedRunningTime="2025-11-22 10:56:24.43661025 +0000 UTC m=+1004.738215537" Nov 22 10:56:24 crc kubenswrapper[4926]: I1122 10:56:24.737696 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl-config-k2fqr"] Nov 22 10:56:24 crc kubenswrapper[4926]: W1122 10:56:24.747480 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8dad7d03_1fee_434f_b9b0_39fde646035a.slice/crio-b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2 WatchSource:0}: Error finding container b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2: Status 404 returned error can't find the container with id b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2 Nov 22 10:56:25 crc kubenswrapper[4926]: I1122 10:56:25.421155 4926 generic.go:334] "Generic (PLEG): container finished" podID="8dad7d03-1fee-434f-b9b0-39fde646035a" containerID="edd9a0bdf0930891bdbccf0494ad4f3e4d34a7bb60cc4ecd321c0098b12cf0d3" exitCode=0 Nov 22 10:56:25 crc kubenswrapper[4926]: I1122 10:56:25.421220 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-k2fqr" event={"ID":"8dad7d03-1fee-434f-b9b0-39fde646035a","Type":"ContainerDied","Data":"edd9a0bdf0930891bdbccf0494ad4f3e4d34a7bb60cc4ecd321c0098b12cf0d3"} Nov 22 10:56:25 crc kubenswrapper[4926]: I1122 10:56:25.421486 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-k2fqr" event={"ID":"8dad7d03-1fee-434f-b9b0-39fde646035a","Type":"ContainerStarted","Data":"b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2"} Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.778917 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923112 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923289 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-djfhs\" (UniqueName: \"kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923335 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923396 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923442 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn\") pod \"8dad7d03-1fee-434f-b9b0-39fde646035a\" (UID: \"8dad7d03-1fee-434f-b9b0-39fde646035a\") " Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923482 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run" (OuterVolumeSpecName: "var-run") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923549 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.923650 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.924040 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.924107 4926 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.924123 4926 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.924135 4926 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/8dad7d03-1fee-434f-b9b0-39fde646035a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.924603 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts" (OuterVolumeSpecName: "scripts") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:26 crc kubenswrapper[4926]: I1122 10:56:26.929058 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs" (OuterVolumeSpecName: "kube-api-access-djfhs") pod "8dad7d03-1fee-434f-b9b0-39fde646035a" (UID: "8dad7d03-1fee-434f-b9b0-39fde646035a"). InnerVolumeSpecName "kube-api-access-djfhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.026135 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-djfhs\" (UniqueName: \"kubernetes.io/projected/8dad7d03-1fee-434f-b9b0-39fde646035a-kube-api-access-djfhs\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.026170 4926 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.026182 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8dad7d03-1fee-434f-b9b0-39fde646035a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.447710 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-k2fqr" event={"ID":"8dad7d03-1fee-434f-b9b0-39fde646035a","Type":"ContainerDied","Data":"b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2"} Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.447762 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-k2fqr" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.447751 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b178a9fe763a2d67469268384c531e4ebfe96a266b4f4ea7726bed71580166c2" Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.890080 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-pwfdl-config-k2fqr"] Nov 22 10:56:27 crc kubenswrapper[4926]: I1122 10:56:27.895491 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-pwfdl-config-k2fqr"] Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.009247 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-pwfdl-config-pljg5"] Nov 22 10:56:28 crc kubenswrapper[4926]: E1122 10:56:28.009607 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dad7d03-1fee-434f-b9b0-39fde646035a" containerName="ovn-config" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.009623 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dad7d03-1fee-434f-b9b0-39fde646035a" containerName="ovn-config" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.009772 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dad7d03-1fee-434f-b9b0-39fde646035a" containerName="ovn-config" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.010320 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.017426 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.027455 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl-config-pljg5"] Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146192 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146400 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146455 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146572 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.146692 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p4xt2\" (UniqueName: \"kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248058 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248230 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p4xt2\" (UniqueName: \"kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248268 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248295 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248339 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248361 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248484 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248505 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.248554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.249513 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.250967 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.266559 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p4xt2\" (UniqueName: \"kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2\") pod \"ovn-controller-pwfdl-config-pljg5\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.335183 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.593052 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dad7d03-1fee-434f-b9b0-39fde646035a" path="/var/lib/kubelet/pods/8dad7d03-1fee-434f-b9b0-39fde646035a/volumes" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.736220 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-pwfdl" Nov 22 10:56:28 crc kubenswrapper[4926]: I1122 10:56:28.785847 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-pwfdl-config-pljg5"] Nov 22 10:56:29 crc kubenswrapper[4926]: I1122 10:56:29.465263 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-pljg5" event={"ID":"e9d84c57-f414-493e-9fd6-9a7961999727","Type":"ContainerStarted","Data":"f9e5017f7f19083529fb2fa8015c6f96f24811a8201e239da7bf1a8e43410573"} Nov 22 10:56:29 crc kubenswrapper[4926]: I1122 10:56:29.465874 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-pljg5" event={"ID":"e9d84c57-f414-493e-9fd6-9a7961999727","Type":"ContainerStarted","Data":"1cf1606c04ad4f12bf860c0166ba52905c18a9046a7493fe9a22c726aca957d5"} Nov 22 10:56:29 crc kubenswrapper[4926]: I1122 10:56:29.466600 4926 generic.go:334] "Generic (PLEG): container finished" podID="a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" containerID="04a51ac8e1698115caf042a93f113f50deb41ab5145e1e6d37b799736ef2cb52" exitCode=0 Nov 22 10:56:29 crc kubenswrapper[4926]: I1122 10:56:29.466621 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tgj2k" event={"ID":"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6","Type":"ContainerDied","Data":"04a51ac8e1698115caf042a93f113f50deb41ab5145e1e6d37b799736ef2cb52"} Nov 22 10:56:29 crc kubenswrapper[4926]: I1122 10:56:29.488017 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-pwfdl-config-pljg5" podStartSLOduration=2.487992796 podStartE2EDuration="2.487992796s" podCreationTimestamp="2025-11-22 10:56:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:29.481839049 +0000 UTC m=+1009.783444346" watchObservedRunningTime="2025-11-22 10:56:29.487992796 +0000 UTC m=+1009.789598083" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.031826 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.112149 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.112423 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="dnsmasq-dns" containerID="cri-o://4dff75ddd520308ecb18884a161069763659fcb0c42f8df3481c373a2a0addf3" gracePeriod=10 Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.477786 4926 generic.go:334] "Generic (PLEG): container finished" podID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerID="4dff75ddd520308ecb18884a161069763659fcb0c42f8df3481c373a2a0addf3" exitCode=0 Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.477848 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" event={"ID":"37642f49-f656-4aeb-a359-2c32f4cf7919","Type":"ContainerDied","Data":"4dff75ddd520308ecb18884a161069763659fcb0c42f8df3481c373a2a0addf3"} Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.477876 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" event={"ID":"37642f49-f656-4aeb-a359-2c32f4cf7919","Type":"ContainerDied","Data":"f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505"} Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.477906 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f75afefa92660dc1ec94f94408356fad207d07e851c38808c7af989d1042f505" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.479481 4926 generic.go:334] "Generic (PLEG): container finished" podID="e9d84c57-f414-493e-9fd6-9a7961999727" containerID="f9e5017f7f19083529fb2fa8015c6f96f24811a8201e239da7bf1a8e43410573" exitCode=0 Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.479593 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-pljg5" event={"ID":"e9d84c57-f414-493e-9fd6-9a7961999727","Type":"ContainerDied","Data":"f9e5017f7f19083529fb2fa8015c6f96f24811a8201e239da7bf1a8e43410573"} Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.560612 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.715865 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc\") pod \"37642f49-f656-4aeb-a359-2c32f4cf7919\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.716018 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb\") pod \"37642f49-f656-4aeb-a359-2c32f4cf7919\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.716074 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb\") pod \"37642f49-f656-4aeb-a359-2c32f4cf7919\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.716107 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cstqh\" (UniqueName: \"kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh\") pod \"37642f49-f656-4aeb-a359-2c32f4cf7919\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.716222 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config\") pod \"37642f49-f656-4aeb-a359-2c32f4cf7919\" (UID: \"37642f49-f656-4aeb-a359-2c32f4cf7919\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.729216 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh" (OuterVolumeSpecName: "kube-api-access-cstqh") pod "37642f49-f656-4aeb-a359-2c32f4cf7919" (UID: "37642f49-f656-4aeb-a359-2c32f4cf7919"). InnerVolumeSpecName "kube-api-access-cstqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.764214 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "37642f49-f656-4aeb-a359-2c32f4cf7919" (UID: "37642f49-f656-4aeb-a359-2c32f4cf7919"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.770692 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config" (OuterVolumeSpecName: "config") pod "37642f49-f656-4aeb-a359-2c32f4cf7919" (UID: "37642f49-f656-4aeb-a359-2c32f4cf7919"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.771804 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "37642f49-f656-4aeb-a359-2c32f4cf7919" (UID: "37642f49-f656-4aeb-a359-2c32f4cf7919"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.788045 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "37642f49-f656-4aeb-a359-2c32f4cf7919" (UID: "37642f49-f656-4aeb-a359-2c32f4cf7919"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.818459 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.818512 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.818524 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.818622 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/37642f49-f656-4aeb-a359-2c32f4cf7919-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.818636 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cstqh\" (UniqueName: \"kubernetes.io/projected/37642f49-f656-4aeb-a359-2c32f4cf7919-kube-api-access-cstqh\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.819831 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.920544 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q89d5\" (UniqueName: \"kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5\") pod \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.920633 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data\") pod \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.920760 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle\") pod \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.920788 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data\") pod \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\" (UID: \"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6\") " Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.925321 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" (UID: "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.925339 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5" (OuterVolumeSpecName: "kube-api-access-q89d5") pod "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" (UID: "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6"). InnerVolumeSpecName "kube-api-access-q89d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.943792 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" (UID: "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:30 crc kubenswrapper[4926]: I1122 10:56:30.965553 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data" (OuterVolumeSpecName: "config-data") pod "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" (UID: "a8055b7b-5391-4ded-a3a7-ca9e86b43ec6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.022214 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q89d5\" (UniqueName: \"kubernetes.io/projected/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-kube-api-access-q89d5\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.022253 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.022262 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.022271 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.501005 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-tgj2k" event={"ID":"a8055b7b-5391-4ded-a3a7-ca9e86b43ec6","Type":"ContainerDied","Data":"39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587"} Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.501050 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39c5100137684100462ffb361ee59630feab6602f35db5bae7f3da7d4b6fb587" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.501057 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-m7sk6" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.501100 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-tgj2k" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.549917 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.558479 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-m7sk6"] Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.854453 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.859426 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:56:31 crc kubenswrapper[4926]: E1122 10:56:31.859806 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="init" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.859824 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="init" Nov 22 10:56:31 crc kubenswrapper[4926]: E1122 10:56:31.859860 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9d84c57-f414-493e-9fd6-9a7961999727" containerName="ovn-config" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.859868 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9d84c57-f414-493e-9fd6-9a7961999727" containerName="ovn-config" Nov 22 10:56:31 crc kubenswrapper[4926]: E1122 10:56:31.859921 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="dnsmasq-dns" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.859931 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="dnsmasq-dns" Nov 22 10:56:31 crc kubenswrapper[4926]: E1122 10:56:31.859947 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" containerName="glance-db-sync" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.859955 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" containerName="glance-db-sync" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.860157 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" containerName="glance-db-sync" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.860180 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" containerName="dnsmasq-dns" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.860190 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9d84c57-f414-493e-9fd6-9a7961999727" containerName="ovn-config" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.861221 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:31 crc kubenswrapper[4926]: I1122 10:56:31.886941 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042385 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042439 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042501 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042534 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042579 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042608 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p4xt2\" (UniqueName: \"kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2\") pod \"e9d84c57-f414-493e-9fd6-9a7961999727\" (UID: \"e9d84c57-f414-493e-9fd6-9a7961999727\") " Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042787 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssvdc\" (UniqueName: \"kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042919 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.042985 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.043019 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.043052 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.043142 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.043175 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.043195 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run" (OuterVolumeSpecName: "var-run") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.044209 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.045267 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts" (OuterVolumeSpecName: "scripts") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.049516 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2" (OuterVolumeSpecName: "kube-api-access-p4xt2") pod "e9d84c57-f414-493e-9fd6-9a7961999727" (UID: "e9d84c57-f414-493e-9fd6-9a7961999727"). InnerVolumeSpecName "kube-api-access-p4xt2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.144060 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.144392 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.144595 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.144760 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.144911 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssvdc\" (UniqueName: \"kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.145015 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.145167 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.145847 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.146240 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.146467 4926 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.146918 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147350 4926 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147450 4926 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e9d84c57-f414-493e-9fd6-9a7961999727-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147539 4926 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147600 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147614 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e9d84c57-f414-493e-9fd6-9a7961999727-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.147767 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p4xt2\" (UniqueName: \"kubernetes.io/projected/e9d84c57-f414-493e-9fd6-9a7961999727-kube-api-access-p4xt2\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.166078 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssvdc\" (UniqueName: \"kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc\") pod \"dnsmasq-dns-895cf5cf-qk577\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.176648 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.412729 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:56:32 crc kubenswrapper[4926]: W1122 10:56:32.416516 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb52ce28_2f07_4bdf_8c24_d793e49104d8.slice/crio-a598667724c79f020a465ab929a93efce9a89703415e680fb164bb1af0810214 WatchSource:0}: Error finding container a598667724c79f020a465ab929a93efce9a89703415e680fb164bb1af0810214: Status 404 returned error can't find the container with id a598667724c79f020a465ab929a93efce9a89703415e680fb164bb1af0810214 Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.516396 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-qk577" event={"ID":"db52ce28-2f07-4bdf-8c24-d793e49104d8","Type":"ContainerStarted","Data":"a598667724c79f020a465ab929a93efce9a89703415e680fb164bb1af0810214"} Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.527636 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-pwfdl-config-pljg5" event={"ID":"e9d84c57-f414-493e-9fd6-9a7961999727","Type":"ContainerDied","Data":"1cf1606c04ad4f12bf860c0166ba52905c18a9046a7493fe9a22c726aca957d5"} Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.527710 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1cf1606c04ad4f12bf860c0166ba52905c18a9046a7493fe9a22c726aca957d5" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.530640 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-pwfdl-config-pljg5" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.564019 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-pwfdl-config-pljg5"] Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.572805 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-pwfdl-config-pljg5"] Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.620669 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37642f49-f656-4aeb-a359-2c32f4cf7919" path="/var/lib/kubelet/pods/37642f49-f656-4aeb-a359-2c32f4cf7919/volumes" Nov 22 10:56:32 crc kubenswrapper[4926]: I1122 10:56:32.621695 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9d84c57-f414-493e-9fd6-9a7961999727" path="/var/lib/kubelet/pods/e9d84c57-f414-493e-9fd6-9a7961999727/volumes" Nov 22 10:56:33 crc kubenswrapper[4926]: I1122 10:56:33.542244 4926 generic.go:334] "Generic (PLEG): container finished" podID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerID="fe42f298dc9f0227096ee7814c8e92300a986c8b7602efd6144bd8d4b60ece35" exitCode=0 Nov 22 10:56:33 crc kubenswrapper[4926]: I1122 10:56:33.542356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-qk577" event={"ID":"db52ce28-2f07-4bdf-8c24-d793e49104d8","Type":"ContainerDied","Data":"fe42f298dc9f0227096ee7814c8e92300a986c8b7602efd6144bd8d4b60ece35"} Nov 22 10:56:34 crc kubenswrapper[4926]: I1122 10:56:34.553020 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-qk577" event={"ID":"db52ce28-2f07-4bdf-8c24-d793e49104d8","Type":"ContainerStarted","Data":"6113b7fc74d3a90dd151a5da455c660a68b421b636dd0a99fc5c202da42de9bd"} Nov 22 10:56:34 crc kubenswrapper[4926]: I1122 10:56:34.553368 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:34 crc kubenswrapper[4926]: I1122 10:56:34.582201 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-895cf5cf-qk577" podStartSLOduration=3.582181564 podStartE2EDuration="3.582181564s" podCreationTimestamp="2025-11-22 10:56:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:34.57961421 +0000 UTC m=+1014.881219507" watchObservedRunningTime="2025-11-22 10:56:34.582181564 +0000 UTC m=+1014.883786851" Nov 22 10:56:34 crc kubenswrapper[4926]: I1122 10:56:34.597226 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 10:56:34 crc kubenswrapper[4926]: I1122 10:56:34.808118 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.577616 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-9g6rc"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.578968 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.601096 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9g6rc"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.687811 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-chmwr"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.689336 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.694981 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-d48c-account-create-update-8kkx2"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.696492 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.706401 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.709818 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-chmwr"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.720410 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d48c-account-create-update-8kkx2"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.720688 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz97j\" (UniqueName: \"kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.720737 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.791135 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-619d-account-create-update-qwqzd"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.792402 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.795494 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.806850 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-619d-account-create-update-qwqzd"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.822814 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz97j\" (UniqueName: \"kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.822939 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69s27\" (UniqueName: \"kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.822985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.823015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.823084 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tr4l\" (UniqueName: \"kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.823185 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.824944 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.900048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz97j\" (UniqueName: \"kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j\") pod \"cinder-db-create-9g6rc\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.908699 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.926994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.927071 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.927112 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7dtr\" (UniqueName: \"kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.927149 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69s27\" (UniqueName: \"kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.927172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.927204 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tr4l\" (UniqueName: \"kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.928152 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.928773 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.955095 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tr4l\" (UniqueName: \"kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l\") pod \"barbican-db-create-chmwr\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.965656 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69s27\" (UniqueName: \"kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27\") pod \"barbican-d48c-account-create-update-8kkx2\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.995948 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-v8z5c"] Nov 22 10:56:36 crc kubenswrapper[4926]: I1122 10:56:36.997333 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.001939 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v8z5c"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.007527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.008918 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-1603-account-create-update-v8zj7"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.010460 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.013566 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.015425 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1603-account-create-update-v8zj7"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.022436 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.028961 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.029009 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7dtr\" (UniqueName: \"kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.029822 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.055046 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7dtr\" (UniqueName: \"kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr\") pod \"cinder-619d-account-create-update-qwqzd\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.114127 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-lbhdp"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.115357 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.120819 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.121080 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.121172 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kngh7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.121196 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.130065 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpfrb\" (UniqueName: \"kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.130128 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt9bm\" (UniqueName: \"kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.130161 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.130208 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.143466 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-lbhdp"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.150007 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.231918 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232040 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpfrb\" (UniqueName: \"kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232078 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232113 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt9bm\" (UniqueName: \"kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232135 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232157 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232179 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cgxz\" (UniqueName: \"kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.232799 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.233242 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.262957 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpfrb\" (UniqueName: \"kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb\") pod \"neutron-db-create-v8z5c\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.266003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt9bm\" (UniqueName: \"kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm\") pod \"neutron-1603-account-create-update-v8zj7\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.327544 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.333402 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.333462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.333498 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cgxz\" (UniqueName: \"kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.338011 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.338561 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.350577 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cgxz\" (UniqueName: \"kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz\") pod \"keystone-db-sync-lbhdp\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.416731 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.437734 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.475024 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-9g6rc"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.583802 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9g6rc" event={"ID":"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c","Type":"ContainerStarted","Data":"82058a0b4c2d73349a580b2f69a52ab961d2250431ecc37b5faee84f2a420a6e"} Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.631916 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-d48c-account-create-update-8kkx2"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.704670 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-619d-account-create-update-qwqzd"] Nov 22 10:56:37 crc kubenswrapper[4926]: W1122 10:56:37.711751 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a3652ee_cb8e_4f96_8953_ba9f3a4cef79.slice/crio-b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead WatchSource:0}: Error finding container b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead: Status 404 returned error can't find the container with id b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.753141 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-chmwr"] Nov 22 10:56:37 crc kubenswrapper[4926]: I1122 10:56:37.984647 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v8z5c"] Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.027907 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-1603-account-create-update-v8zj7"] Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.043041 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-lbhdp"] Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.598740 4926 generic.go:334] "Generic (PLEG): container finished" podID="dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" containerID="a05ee305fa9670ecd2cec4a8728d5462baa2581f1025b2c32c67ca9fe162b846" exitCode=0 Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.598938 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9g6rc" event={"ID":"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c","Type":"ContainerDied","Data":"a05ee305fa9670ecd2cec4a8728d5462baa2581f1025b2c32c67ca9fe162b846"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.602638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1603-account-create-update-v8zj7" event={"ID":"ebe92277-de4f-49bf-a390-a1a649e35b2a","Type":"ContainerStarted","Data":"2afcf3d8cf96a3fa67f4252e2793b4ba5cc99357bd37ac57be630abf687f325b"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.602684 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1603-account-create-update-v8zj7" event={"ID":"ebe92277-de4f-49bf-a390-a1a649e35b2a","Type":"ContainerStarted","Data":"17ea9fce34f1bbcd0fb9bd75bc1d6c7340957571fb2fbc22aece01ab8c84cf3d"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.604824 4926 generic.go:334] "Generic (PLEG): container finished" podID="7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" containerID="8442a9d52ad5c65e7b1a997b242b9e85f5bb066c19e108a3f1539379efbc1f2b" exitCode=0 Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.604878 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-619d-account-create-update-qwqzd" event={"ID":"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79","Type":"ContainerDied","Data":"8442a9d52ad5c65e7b1a997b242b9e85f5bb066c19e108a3f1539379efbc1f2b"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.604916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-619d-account-create-update-qwqzd" event={"ID":"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79","Type":"ContainerStarted","Data":"b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.606456 4926 generic.go:334] "Generic (PLEG): container finished" podID="960db37c-8997-460e-9f83-46d08b7597b1" containerID="b0e3bdbb07fe25755c1e52530762c8d9446781e358ae051f6a85f9e88f545126" exitCode=0 Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.606499 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v8z5c" event={"ID":"960db37c-8997-460e-9f83-46d08b7597b1","Type":"ContainerDied","Data":"b0e3bdbb07fe25755c1e52530762c8d9446781e358ae051f6a85f9e88f545126"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.606517 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v8z5c" event={"ID":"960db37c-8997-460e-9f83-46d08b7597b1","Type":"ContainerStarted","Data":"148ce91ce538ced5947ac1db49f17466ec8d5431f465cc19660da555827349c2"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.608035 4926 generic.go:334] "Generic (PLEG): container finished" podID="48b1eac1-fa58-499b-ad3b-f76d66b4f471" containerID="b0a75f89a583a31f06294d77b38484c29b8effc5db0ad2e8c255a7156a8d3c51" exitCode=0 Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.608081 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-chmwr" event={"ID":"48b1eac1-fa58-499b-ad3b-f76d66b4f471","Type":"ContainerDied","Data":"b0a75f89a583a31f06294d77b38484c29b8effc5db0ad2e8c255a7156a8d3c51"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.608100 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-chmwr" event={"ID":"48b1eac1-fa58-499b-ad3b-f76d66b4f471","Type":"ContainerStarted","Data":"d208d21632a26ae05b55db0a8596d7dbfcc7f62e99f257b13c4b3e48bcb08b46"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.623785 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-lbhdp" event={"ID":"dd927212-5062-4fb3-b93e-6804d57c251c","Type":"ContainerStarted","Data":"311156ab9021f4a2727862ffa1dc8dca8cd339f1e8b66f47cdddc59fa8c9ef2a"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.626028 4926 generic.go:334] "Generic (PLEG): container finished" podID="66f0276e-33a0-4b96-ae5d-866925c6310a" containerID="e41d7d056d21ace0640b804051c41cea9d2239f6e9948e5404521e3e489aa701" exitCode=0 Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.626116 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d48c-account-create-update-8kkx2" event={"ID":"66f0276e-33a0-4b96-ae5d-866925c6310a","Type":"ContainerDied","Data":"e41d7d056d21ace0640b804051c41cea9d2239f6e9948e5404521e3e489aa701"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.626311 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d48c-account-create-update-8kkx2" event={"ID":"66f0276e-33a0-4b96-ae5d-866925c6310a","Type":"ContainerStarted","Data":"1190f35ee07c247358ac48a44a012c404cb80f746cbc7e6c03b202c0fb794fd9"} Nov 22 10:56:38 crc kubenswrapper[4926]: I1122 10:56:38.685144 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-1603-account-create-update-v8zj7" podStartSLOduration=2.685105121 podStartE2EDuration="2.685105121s" podCreationTimestamp="2025-11-22 10:56:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:38.680642313 +0000 UTC m=+1018.982247600" watchObservedRunningTime="2025-11-22 10:56:38.685105121 +0000 UTC m=+1018.986710408" Nov 22 10:56:39 crc kubenswrapper[4926]: I1122 10:56:39.636078 4926 generic.go:334] "Generic (PLEG): container finished" podID="ebe92277-de4f-49bf-a390-a1a649e35b2a" containerID="2afcf3d8cf96a3fa67f4252e2793b4ba5cc99357bd37ac57be630abf687f325b" exitCode=0 Nov 22 10:56:39 crc kubenswrapper[4926]: I1122 10:56:39.636125 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1603-account-create-update-v8zj7" event={"ID":"ebe92277-de4f-49bf-a390-a1a649e35b2a","Type":"ContainerDied","Data":"2afcf3d8cf96a3fa67f4252e2793b4ba5cc99357bd37ac57be630abf687f325b"} Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.178053 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.241484 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.241809 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="dnsmasq-dns" containerID="cri-o://e2bddfc966f356d619dfc8e4082a9545f5020cec227d7cb67bd890ad9679ce8f" gracePeriod=10 Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.661598 4926 generic.go:334] "Generic (PLEG): container finished" podID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerID="e2bddfc966f356d619dfc8e4082a9545f5020cec227d7cb67bd890ad9679ce8f" exitCode=0 Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.661689 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" event={"ID":"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13","Type":"ContainerDied","Data":"e2bddfc966f356d619dfc8e4082a9545f5020cec227d7cb67bd890ad9679ce8f"} Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.901998 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.907951 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.951062 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:42 crc kubenswrapper[4926]: I1122 10:56:42.951588 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.017408 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.023558 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.042298 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.050520 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dz97j\" (UniqueName: \"kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j\") pod \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.050770 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts\") pod \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\" (UID: \"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.050849 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts\") pod \"ebe92277-de4f-49bf-a390-a1a649e35b2a\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.050986 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tr4l\" (UniqueName: \"kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l\") pod \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.051109 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts\") pod \"66f0276e-33a0-4b96-ae5d-866925c6310a\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.054515 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69s27\" (UniqueName: \"kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27\") pod \"66f0276e-33a0-4b96-ae5d-866925c6310a\" (UID: \"66f0276e-33a0-4b96-ae5d-866925c6310a\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.054764 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt9bm\" (UniqueName: \"kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm\") pod \"ebe92277-de4f-49bf-a390-a1a649e35b2a\" (UID: \"ebe92277-de4f-49bf-a390-a1a649e35b2a\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.055007 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts\") pod \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\" (UID: \"48b1eac1-fa58-499b-ad3b-f76d66b4f471\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.051865 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" (UID: "dcafcf70-6fc9-4c0c-bc53-8334a5eda59c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.051925 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ebe92277-de4f-49bf-a390-a1a649e35b2a" (UID: "ebe92277-de4f-49bf-a390-a1a649e35b2a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.052463 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "66f0276e-33a0-4b96-ae5d-866925c6310a" (UID: "66f0276e-33a0-4b96-ae5d-866925c6310a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.056130 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "48b1eac1-fa58-499b-ad3b-f76d66b4f471" (UID: "48b1eac1-fa58-499b-ad3b-f76d66b4f471"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.057826 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j" (OuterVolumeSpecName: "kube-api-access-dz97j") pod "dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" (UID: "dcafcf70-6fc9-4c0c-bc53-8334a5eda59c"). InnerVolumeSpecName "kube-api-access-dz97j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.061347 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm" (OuterVolumeSpecName: "kube-api-access-kt9bm") pod "ebe92277-de4f-49bf-a390-a1a649e35b2a" (UID: "ebe92277-de4f-49bf-a390-a1a649e35b2a"). InnerVolumeSpecName "kube-api-access-kt9bm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.061369 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l" (OuterVolumeSpecName: "kube-api-access-9tr4l") pod "48b1eac1-fa58-499b-ad3b-f76d66b4f471" (UID: "48b1eac1-fa58-499b-ad3b-f76d66b4f471"). InnerVolumeSpecName "kube-api-access-9tr4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.061400 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27" (OuterVolumeSpecName: "kube-api-access-69s27") pod "66f0276e-33a0-4b96-ae5d-866925c6310a" (UID: "66f0276e-33a0-4b96-ae5d-866925c6310a"). InnerVolumeSpecName "kube-api-access-69s27". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts\") pod \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157111 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpfrb\" (UniqueName: \"kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb\") pod \"960db37c-8997-460e-9f83-46d08b7597b1\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157548 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" (UID: "7a3652ee-cb8e-4f96-8953-ba9f3a4cef79"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157689 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157720 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157801 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx9vf\" (UniqueName: \"kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157871 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157917 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts\") pod \"960db37c-8997-460e-9f83-46d08b7597b1\" (UID: \"960db37c-8997-460e-9f83-46d08b7597b1\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157958 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.157984 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7dtr\" (UniqueName: \"kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr\") pod \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\" (UID: \"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158009 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc\") pod \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\" (UID: \"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13\") " Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158392 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "960db37c-8997-460e-9f83-46d08b7597b1" (UID: "960db37c-8997-460e-9f83-46d08b7597b1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158493 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/66f0276e-33a0-4b96-ae5d-866925c6310a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158512 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69s27\" (UniqueName: \"kubernetes.io/projected/66f0276e-33a0-4b96-ae5d-866925c6310a-kube-api-access-69s27\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158526 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt9bm\" (UniqueName: \"kubernetes.io/projected/ebe92277-de4f-49bf-a390-a1a649e35b2a-kube-api-access-kt9bm\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158538 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/960db37c-8997-460e-9f83-46d08b7597b1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158549 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/48b1eac1-fa58-499b-ad3b-f76d66b4f471-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158560 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dz97j\" (UniqueName: \"kubernetes.io/projected/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-kube-api-access-dz97j\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158571 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158582 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe92277-de4f-49bf-a390-a1a649e35b2a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158594 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tr4l\" (UniqueName: \"kubernetes.io/projected/48b1eac1-fa58-499b-ad3b-f76d66b4f471-kube-api-access-9tr4l\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.158604 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.162779 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr" (OuterVolumeSpecName: "kube-api-access-k7dtr") pod "7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" (UID: "7a3652ee-cb8e-4f96-8953-ba9f3a4cef79"). InnerVolumeSpecName "kube-api-access-k7dtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.163569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf" (OuterVolumeSpecName: "kube-api-access-xx9vf") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "kube-api-access-xx9vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.167247 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb" (OuterVolumeSpecName: "kube-api-access-cpfrb") pod "960db37c-8997-460e-9f83-46d08b7597b1" (UID: "960db37c-8997-460e-9f83-46d08b7597b1"). InnerVolumeSpecName "kube-api-access-cpfrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.204336 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.209216 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config" (OuterVolumeSpecName: "config") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.210605 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.215960 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.217508 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" (UID: "659e8c42-1cfd-49fa-a47c-c9c9f15f8a13"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260227 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx9vf\" (UniqueName: \"kubernetes.io/projected/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-kube-api-access-xx9vf\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260264 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260280 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260292 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7dtr\" (UniqueName: \"kubernetes.io/projected/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79-kube-api-access-k7dtr\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260303 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260317 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpfrb\" (UniqueName: \"kubernetes.io/projected/960db37c-8997-460e-9f83-46d08b7597b1-kube-api-access-cpfrb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260328 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.260337 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.671682 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-9g6rc" event={"ID":"dcafcf70-6fc9-4c0c-bc53-8334a5eda59c","Type":"ContainerDied","Data":"82058a0b4c2d73349a580b2f69a52ab961d2250431ecc37b5faee84f2a420a6e"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.672052 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="82058a0b4c2d73349a580b2f69a52ab961d2250431ecc37b5faee84f2a420a6e" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.672217 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-9g6rc" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.681778 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-1603-account-create-update-v8zj7" event={"ID":"ebe92277-de4f-49bf-a390-a1a649e35b2a","Type":"ContainerDied","Data":"17ea9fce34f1bbcd0fb9bd75bc1d6c7340957571fb2fbc22aece01ab8c84cf3d"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.681824 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="17ea9fce34f1bbcd0fb9bd75bc1d6c7340957571fb2fbc22aece01ab8c84cf3d" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.681903 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-1603-account-create-update-v8zj7" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.685585 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-619d-account-create-update-qwqzd" event={"ID":"7a3652ee-cb8e-4f96-8953-ba9f3a4cef79","Type":"ContainerDied","Data":"b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.685649 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7d831e054b3f4902611ed7d61d0a240e8af9f9fcd369336de5eba83842c9ead" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.685722 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-619d-account-create-update-qwqzd" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.693462 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" event={"ID":"659e8c42-1cfd-49fa-a47c-c9c9f15f8a13","Type":"ContainerDied","Data":"a3d1d175300df2ee51edbc898fa9c92d460feaf9be3ad96c23d17e6eee41a8e7"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.693526 4926 scope.go:117] "RemoveContainer" containerID="e2bddfc966f356d619dfc8e4082a9545f5020cec227d7cb67bd890ad9679ce8f" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.693666 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-dm4tj" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.696506 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v8z5c" event={"ID":"960db37c-8997-460e-9f83-46d08b7597b1","Type":"ContainerDied","Data":"148ce91ce538ced5947ac1db49f17466ec8d5431f465cc19660da555827349c2"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.698378 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="148ce91ce538ced5947ac1db49f17466ec8d5431f465cc19660da555827349c2" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.698534 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v8z5c" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.711802 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-chmwr" event={"ID":"48b1eac1-fa58-499b-ad3b-f76d66b4f471","Type":"ContainerDied","Data":"d208d21632a26ae05b55db0a8596d7dbfcc7f62e99f257b13c4b3e48bcb08b46"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.711838 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d208d21632a26ae05b55db0a8596d7dbfcc7f62e99f257b13c4b3e48bcb08b46" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.711947 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-chmwr" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.754200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-lbhdp" event={"ID":"dd927212-5062-4fb3-b93e-6804d57c251c","Type":"ContainerStarted","Data":"ce0a4d5f5326f1f2b474d106d6f352c77b72bc88d3b0fffb6a0e5b959fb7d238"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.759176 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-d48c-account-create-update-8kkx2" event={"ID":"66f0276e-33a0-4b96-ae5d-866925c6310a","Type":"ContainerDied","Data":"1190f35ee07c247358ac48a44a012c404cb80f746cbc7e6c03b202c0fb794fd9"} Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.759226 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1190f35ee07c247358ac48a44a012c404cb80f746cbc7e6c03b202c0fb794fd9" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.759306 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-d48c-account-create-update-8kkx2" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.780791 4926 scope.go:117] "RemoveContainer" containerID="79bc475f2e0f231025dd4ae0b1d044857c501d86cf24cd40d645b4071a2e0558" Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.780950 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.787180 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-dm4tj"] Nov 22 10:56:43 crc kubenswrapper[4926]: I1122 10:56:43.813408 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-lbhdp" podStartSLOduration=2.137604056 podStartE2EDuration="6.813391649s" podCreationTimestamp="2025-11-22 10:56:37 +0000 UTC" firstStartedPulling="2025-11-22 10:56:38.059256983 +0000 UTC m=+1018.360862270" lastFinishedPulling="2025-11-22 10:56:42.735044566 +0000 UTC m=+1023.036649863" observedRunningTime="2025-11-22 10:56:43.80854039 +0000 UTC m=+1024.110145677" watchObservedRunningTime="2025-11-22 10:56:43.813391649 +0000 UTC m=+1024.114996936" Nov 22 10:56:44 crc kubenswrapper[4926]: I1122 10:56:44.597774 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" path="/var/lib/kubelet/pods/659e8c42-1cfd-49fa-a47c-c9c9f15f8a13/volumes" Nov 22 10:56:45 crc kubenswrapper[4926]: I1122 10:56:45.780071 4926 generic.go:334] "Generic (PLEG): container finished" podID="dd927212-5062-4fb3-b93e-6804d57c251c" containerID="ce0a4d5f5326f1f2b474d106d6f352c77b72bc88d3b0fffb6a0e5b959fb7d238" exitCode=0 Nov 22 10:56:45 crc kubenswrapper[4926]: I1122 10:56:45.780167 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-lbhdp" event={"ID":"dd927212-5062-4fb3-b93e-6804d57c251c","Type":"ContainerDied","Data":"ce0a4d5f5326f1f2b474d106d6f352c77b72bc88d3b0fffb6a0e5b959fb7d238"} Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.150780 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.235712 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data\") pod \"dd927212-5062-4fb3-b93e-6804d57c251c\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.235901 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cgxz\" (UniqueName: \"kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz\") pod \"dd927212-5062-4fb3-b93e-6804d57c251c\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.235931 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle\") pod \"dd927212-5062-4fb3-b93e-6804d57c251c\" (UID: \"dd927212-5062-4fb3-b93e-6804d57c251c\") " Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.242030 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz" (OuterVolumeSpecName: "kube-api-access-7cgxz") pod "dd927212-5062-4fb3-b93e-6804d57c251c" (UID: "dd927212-5062-4fb3-b93e-6804d57c251c"). InnerVolumeSpecName "kube-api-access-7cgxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.262122 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd927212-5062-4fb3-b93e-6804d57c251c" (UID: "dd927212-5062-4fb3-b93e-6804d57c251c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.279269 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data" (OuterVolumeSpecName: "config-data") pod "dd927212-5062-4fb3-b93e-6804d57c251c" (UID: "dd927212-5062-4fb3-b93e-6804d57c251c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.337103 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cgxz\" (UniqueName: \"kubernetes.io/projected/dd927212-5062-4fb3-b93e-6804d57c251c-kube-api-access-7cgxz\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.337141 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.337150 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd927212-5062-4fb3-b93e-6804d57c251c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.805178 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-lbhdp" event={"ID":"dd927212-5062-4fb3-b93e-6804d57c251c","Type":"ContainerDied","Data":"311156ab9021f4a2727862ffa1dc8dca8cd339f1e8b66f47cdddc59fa8c9ef2a"} Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.805470 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="311156ab9021f4a2727862ffa1dc8dca8cd339f1e8b66f47cdddc59fa8c9ef2a" Nov 22 10:56:47 crc kubenswrapper[4926]: I1122 10:56:47.805246 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-lbhdp" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.404610 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405113 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="dnsmasq-dns" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405133 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="dnsmasq-dns" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405148 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405155 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405173 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebe92277-de4f-49bf-a390-a1a649e35b2a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405180 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebe92277-de4f-49bf-a390-a1a649e35b2a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405193 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405200 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405212 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd927212-5062-4fb3-b93e-6804d57c251c" containerName="keystone-db-sync" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405219 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd927212-5062-4fb3-b93e-6804d57c251c" containerName="keystone-db-sync" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405230 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48b1eac1-fa58-499b-ad3b-f76d66b4f471" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405238 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="48b1eac1-fa58-499b-ad3b-f76d66b4f471" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405258 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="init" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405265 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="init" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405273 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66f0276e-33a0-4b96-ae5d-866925c6310a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405280 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="66f0276e-33a0-4b96-ae5d-866925c6310a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: E1122 10:56:48.405302 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960db37c-8997-460e-9f83-46d08b7597b1" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405309 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="960db37c-8997-460e-9f83-46d08b7597b1" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405541 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405566 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="48b1eac1-fa58-499b-ad3b-f76d66b4f471" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405575 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="66f0276e-33a0-4b96-ae5d-866925c6310a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405585 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd927212-5062-4fb3-b93e-6804d57c251c" containerName="keystone-db-sync" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405602 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405611 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="659e8c42-1cfd-49fa-a47c-c9c9f15f8a13" containerName="dnsmasq-dns" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405621 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebe92277-de4f-49bf-a390-a1a649e35b2a" containerName="mariadb-account-create-update" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.405633 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="960db37c-8997-460e-9f83-46d08b7597b1" containerName="mariadb-database-create" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.406748 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.415358 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.465309 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-g4n9v"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.472167 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.483107 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.483279 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.483552 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.483717 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.483740 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kngh7" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.497556 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4n9v"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.554819 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.554963 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c64jc\" (UniqueName: \"kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.555005 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.555060 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.555094 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.555137 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.640972 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.645796 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.650341 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-w5qrd" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.650431 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.650584 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.650844 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656043 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656091 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656111 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656135 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656168 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656193 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656221 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqtgl\" (UniqueName: \"kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656273 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c64jc\" (UniqueName: \"kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656300 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656324 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656398 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.656437 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.657228 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.657245 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.657969 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.657984 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.658022 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.662670 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.665139 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.673581 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.673904 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.676861 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.690320 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.694776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c64jc\" (UniqueName: \"kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc\") pod \"dnsmasq-dns-6c9c9f998c-26q99\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.730520 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.757847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.757989 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758020 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758144 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758217 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2jbl\" (UniqueName: \"kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758244 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758292 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqtgl\" (UniqueName: \"kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758345 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758410 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758488 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.758539 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.778308 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.778865 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.779345 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.789749 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.790289 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.812629 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqtgl\" (UniqueName: \"kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl\") pod \"keystone-bootstrap-g4n9v\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.814850 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-fh2gk"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.816589 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.819160 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-cq97l" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.819364 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.819786 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.852756 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-fh2gk"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xm5s\" (UniqueName: \"kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862697 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862719 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862745 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2jbl\" (UniqueName: \"kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862773 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862791 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862813 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862836 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862895 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862923 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.862952 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.872848 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.873151 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.877742 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.889344 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.893200 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.893652 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.897018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2jbl\" (UniqueName: \"kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl\") pod \"horizon-659f95cfc-dgn4q\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.927947 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.944324 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-d6xxc"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.945319 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.961009 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.961360 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.961649 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vns5p" Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.974714 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d6xxc"] Nov 22 10:56:48 crc kubenswrapper[4926]: I1122 10:56:48.985145 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.013505 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x668t\" (UniqueName: \"kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.014079 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.014237 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xm5s\" (UniqueName: \"kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.014372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.014638 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.014919 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.015065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.015234 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.015430 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.028067 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.028262 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.028332 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.028385 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.016582 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.027056 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.030965 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.031329 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.058213 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.063019 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.073457 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.080313 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xm5s\" (UniqueName: \"kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s\") pod \"ceilometer-0\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.083223 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-bw5rr"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.084421 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.086903 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.088212 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-4ggkk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.107235 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129231 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129268 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129292 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129348 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129369 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129387 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6bbn\" (UniqueName: \"kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129442 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmqvk\" (UniqueName: \"kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129459 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129504 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129530 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129549 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m7mn\" (UniqueName: \"kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129593 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129631 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129775 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129798 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x668t\" (UniqueName: \"kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129846 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.129991 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.130237 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.139797 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.141135 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bw5rr"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.142896 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.143007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.145762 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.153940 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.161099 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.162851 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x668t\" (UniqueName: \"kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t\") pod \"cinder-db-sync-fh2gk\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.167396 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c98zk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.167555 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.167720 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.167843 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.184594 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.186033 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.204415 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241421 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241489 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241513 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241538 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241562 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241579 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6bbn\" (UniqueName: \"kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241636 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xmqvk\" (UniqueName: \"kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241669 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241727 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m7mn\" (UniqueName: \"kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.241754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.243355 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.243966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.245129 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.247938 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.249706 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.250036 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.250082 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.253117 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.256617 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.268866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m7mn\" (UniqueName: \"kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn\") pod \"horizon-5b8ff8d89f-n9mb9\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.270934 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xmqvk\" (UniqueName: \"kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk\") pod \"barbican-db-sync-bw5rr\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.272261 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6bbn\" (UniqueName: \"kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn\") pod \"neutron-db-sync-d6xxc\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.281936 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.290656 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-5lxk9"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.292396 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.294788 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.295457 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.295457 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.295613 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-vjw4c" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.315650 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.322014 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5lxk9"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345647 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345696 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345716 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345804 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345827 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345853 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345873 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5p6s\" (UniqueName: \"kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345953 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.345996 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346034 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346049 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346070 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsnkn\" (UniqueName: \"kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346087 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.346108 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.350391 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.419568 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.425828 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449903 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449931 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cclgd\" (UniqueName: \"kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449951 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449977 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsnkn\" (UniqueName: \"kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.449996 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450017 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450048 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450088 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450109 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450126 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450163 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450188 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450220 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450241 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5p6s\" (UniqueName: \"kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450281 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450367 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450396 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.450851 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.451534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.452493 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.453101 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.453871 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.454005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.454543 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.457599 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.457948 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.465189 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.467118 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.473006 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.475637 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.484124 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5p6s\" (UniqueName: \"kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.485772 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.485825 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.489386 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.495038 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsnkn\" (UniqueName: \"kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn\") pod \"dnsmasq-dns-57c957c4ff-9ff7g\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.519523 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.532180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.551558 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cclgd\" (UniqueName: \"kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.551642 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.565716 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.581611 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cclgd\" (UniqueName: \"kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd\") pod \"placement-db-sync-5lxk9\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.624530 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5lxk9" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.711861 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.713405 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.722755 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.723112 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.807986 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.808508 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.828297 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-g4n9v"] Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.863913 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.863975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864009 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864048 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864073 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crkz5\" (UniqueName: \"kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864098 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864134 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.864176 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.872468 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4n9v" event={"ID":"0561d7cf-82b0-4059-b43f-b6d278d7dfc4","Type":"ContainerStarted","Data":"b3dbee8de412600815c2ae66f10010e59ca33fa55259aae69d3bfb3518f106d0"} Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.888262 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-659f95cfc-dgn4q" event={"ID":"29fecf58-02ca-475e-9394-6205a9cdc086","Type":"ContainerStarted","Data":"23790c520d6a7468a6d6347f38d102544bf78a459f8313c61e16280d975ff347"} Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.921280 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" event={"ID":"e6683f47-f70a-4631-a355-802d689e6d96","Type":"ContainerStarted","Data":"79f06f2ad78d3dcf2432f15c8b5ad02d39fddb38e2ef60563fa45b708c469b08"} Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966187 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966240 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966284 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966316 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966351 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crkz5\" (UniqueName: \"kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966373 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966405 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.966503 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.971935 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.976609 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.978961 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.979267 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.984760 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:49 crc kubenswrapper[4926]: I1122 10:56:49.986065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.013521 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.021243 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crkz5\" (UniqueName: \"kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.058599 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-fh2gk"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.058707 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.077743 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.261490 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.273879 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.379415 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d6xxc"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.386258 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bw5rr"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.500599 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:56:50 crc kubenswrapper[4926]: I1122 10:56:50.508645 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5lxk9"] Nov 22 10:56:50 crc kubenswrapper[4926]: W1122 10:56:50.525063 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8d7394c_3470_41de_8f80_dad43dadff31.slice/crio-1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9 WatchSource:0}: Error finding container 1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9: Status 404 returned error can't find the container with id 1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9 Nov 22 10:56:50 crc kubenswrapper[4926]: W1122 10:56:50.534035 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbee0e7d2_310f_4b01_8f79_83f113613329.slice/crio-6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917 WatchSource:0}: Error finding container 6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917: Status 404 returned error can't find the container with id 6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917 Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.903736 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:56:51 crc kubenswrapper[4926]: W1122 10:56:50.924687 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ea354a7_08ce_41f2_8f72_ba6a0782c5f8.slice/crio-0b6b1c0a1b96d6c49bf5912b55e2aa3301c1cc5a0955b36157889a7ec04210d7 WatchSource:0}: Error finding container 0b6b1c0a1b96d6c49bf5912b55e2aa3301c1cc5a0955b36157889a7ec04210d7: Status 404 returned error can't find the container with id 0b6b1c0a1b96d6c49bf5912b55e2aa3301c1cc5a0955b36157889a7ec04210d7 Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.965970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b8ff8d89f-n9mb9" event={"ID":"3b9d3be5-3a32-4378-aeb5-db92457a390f","Type":"ContainerStarted","Data":"f1f38623089c6ddf2dca3492c4e0c0f9b418dbf9332fe83ab1ca8126037ef4b4"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.976091 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4n9v" event={"ID":"0561d7cf-82b0-4059-b43f-b6d278d7dfc4","Type":"ContainerStarted","Data":"640e770632dc635f94dd5c8ee7a65feacae8118fabc7a4ad702ba4491add1bc9"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.979768 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bw5rr" event={"ID":"05bbddb1-c370-4805-9f91-373535d67f52","Type":"ContainerStarted","Data":"09c2dd4fdcf62e99eaa0d6045b0a12d3588636064ff152067280e5f19bf9c7ae"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.981454 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5lxk9" event={"ID":"bee0e7d2-310f-4b01-8f79-83f113613329","Type":"ContainerStarted","Data":"6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.982977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerStarted","Data":"0ac43e049d01b0fd78612539f468ef55b38835efd3d53892f584fc65ab0566c4"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.984273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fh2gk" event={"ID":"63e63df0-e7ff-46a2-9b1d-60be115851ce","Type":"ContainerStarted","Data":"10d395895f40e7c6c34eeda0399a95fcfaa37add2d74670f7f3cd5c73fc96a17"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.999115 4926 generic.go:334] "Generic (PLEG): container finished" podID="b8d7394c-3470-41de-8f80-dad43dadff31" containerID="7ccfdfa250d4b39504002e66aedc68e1d82b49c23654f6ab308d91c16a34e47f" exitCode=0 Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.999308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" event={"ID":"b8d7394c-3470-41de-8f80-dad43dadff31","Type":"ContainerDied","Data":"7ccfdfa250d4b39504002e66aedc68e1d82b49c23654f6ab308d91c16a34e47f"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:50.999334 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" event={"ID":"b8d7394c-3470-41de-8f80-dad43dadff31","Type":"ContainerStarted","Data":"1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.007441 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d6xxc" event={"ID":"6fa25590-aff0-48a4-ac01-3671555d7b1a","Type":"ContainerStarted","Data":"652341ca9c543b35f54c8c72a1597769830475881f229f288f558046f98cdc1b"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.007477 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d6xxc" event={"ID":"6fa25590-aff0-48a4-ac01-3671555d7b1a","Type":"ContainerStarted","Data":"cd06671e38b61d325fc4719c67ed4a6e865e3c3dfe5106ed7f782f212a75828b"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.009862 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-g4n9v" podStartSLOduration=3.009847558 podStartE2EDuration="3.009847558s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:50.999271184 +0000 UTC m=+1031.300876471" watchObservedRunningTime="2025-11-22 10:56:51.009847558 +0000 UTC m=+1031.311452845" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.011481 4926 generic.go:334] "Generic (PLEG): container finished" podID="e6683f47-f70a-4631-a355-802d689e6d96" containerID="5e8e89b6784036565d07a381f743b7398becfcaf47de74f63966a33331b2ed0c" exitCode=0 Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.011511 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" event={"ID":"e6683f47-f70a-4631-a355-802d689e6d96","Type":"ContainerDied","Data":"5e8e89b6784036565d07a381f743b7398becfcaf47de74f63966a33331b2ed0c"} Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.037225 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.108291 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-d6xxc" podStartSLOduration=3.108275069 podStartE2EDuration="3.108275069s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:51.079302436 +0000 UTC m=+1031.380907723" watchObservedRunningTime="2025-11-22 10:56:51.108275069 +0000 UTC m=+1031.409880356" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.349340 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.389454 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.423089 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.435958 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.489612 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.500055 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.524105 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dbms\" (UniqueName: \"kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.524217 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.524252 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.524277 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.524305 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.625429 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.625476 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.625500 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.625536 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.625579 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dbms\" (UniqueName: \"kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.627168 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.631658 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.633115 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.648642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dbms\" (UniqueName: \"kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.655307 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key\") pod \"horizon-6759876769-glz77\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.774701 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6759876769-glz77" Nov 22 10:56:51 crc kubenswrapper[4926]: I1122 10:56:51.945726 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.106144 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.114190 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerStarted","Data":"d406f6ca33481c0a1f5a54ed69dcfc47414696b8221fc46957799064e710eaa8"} Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.116351 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" event={"ID":"e6683f47-f70a-4631-a355-802d689e6d96","Type":"ContainerDied","Data":"79f06f2ad78d3dcf2432f15c8b5ad02d39fddb38e2ef60563fa45b708c469b08"} Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.116396 4926 scope.go:117] "RemoveContainer" containerID="5e8e89b6784036565d07a381f743b7398becfcaf47de74f63966a33331b2ed0c" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.116527 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-26q99" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.142510 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" event={"ID":"b8d7394c-3470-41de-8f80-dad43dadff31","Type":"ContainerStarted","Data":"ef7fb645788760bfd3a10062b152cbea3873c21b8f43f1d9df41e595f2b29d3c"} Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.143029 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148416 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148457 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148503 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148564 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148617 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c64jc\" (UniqueName: \"kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.148644 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config\") pod \"e6683f47-f70a-4631-a355-802d689e6d96\" (UID: \"e6683f47-f70a-4631-a355-802d689e6d96\") " Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.155002 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerStarted","Data":"0b6b1c0a1b96d6c49bf5912b55e2aa3301c1cc5a0955b36157889a7ec04210d7"} Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.162772 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc" (OuterVolumeSpecName: "kube-api-access-c64jc") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "kube-api-access-c64jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.176401 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" podStartSLOduration=4.176381657 podStartE2EDuration="4.176381657s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:52.162647062 +0000 UTC m=+1032.464252349" watchObservedRunningTime="2025-11-22 10:56:52.176381657 +0000 UTC m=+1032.477986944" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.180607 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.192743 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.193129 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.211274 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config" (OuterVolumeSpecName: "config") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.213823 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e6683f47-f70a-4631-a355-802d689e6d96" (UID: "e6683f47-f70a-4631-a355-802d689e6d96"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.250959 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c64jc\" (UniqueName: \"kubernetes.io/projected/e6683f47-f70a-4631-a355-802d689e6d96-kube-api-access-c64jc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.251000 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.251014 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.251040 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.251052 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.251062 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e6683f47-f70a-4631-a355-802d689e6d96-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.354697 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:56:52 crc kubenswrapper[4926]: W1122 10:56:52.377751 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcea79410_ad06_4a0e_9227_20e516784c04.slice/crio-caf92f5a4c37a09e4cf09d080246d42abe5848eb7f8a5f273ddf31e8906d34d5 WatchSource:0}: Error finding container caf92f5a4c37a09e4cf09d080246d42abe5848eb7f8a5f273ddf31e8906d34d5: Status 404 returned error can't find the container with id caf92f5a4c37a09e4cf09d080246d42abe5848eb7f8a5f273ddf31e8906d34d5 Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.487386 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.491056 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-26q99"] Nov 22 10:56:52 crc kubenswrapper[4926]: I1122 10:56:52.606275 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6683f47-f70a-4631-a355-802d689e6d96" path="/var/lib/kubelet/pods/e6683f47-f70a-4631-a355-802d689e6d96/volumes" Nov 22 10:56:53 crc kubenswrapper[4926]: I1122 10:56:53.165308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerStarted","Data":"98e2565e5a641e10d3f74128c1ddee908ef25e86c014b0823efa24c0112cf79c"} Nov 22 10:56:53 crc kubenswrapper[4926]: I1122 10:56:53.168570 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerStarted","Data":"4217f3c73f0b6ea12b31b2c0266858b418fc8ad4827db6b5e792eea4890f571c"} Nov 22 10:56:53 crc kubenswrapper[4926]: I1122 10:56:53.170547 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6759876769-glz77" event={"ID":"cea79410-ad06-4a0e-9227-20e516784c04","Type":"ContainerStarted","Data":"caf92f5a4c37a09e4cf09d080246d42abe5848eb7f8a5f273ddf31e8906d34d5"} Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.186322 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerStarted","Data":"49745d297713e283c421b783744db79ab60ec97569cb97791d6c32cb55a69c61"} Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.186439 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-log" containerID="cri-o://4217f3c73f0b6ea12b31b2c0266858b418fc8ad4827db6b5e792eea4890f571c" gracePeriod=30 Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.186508 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-httpd" containerID="cri-o://49745d297713e283c421b783744db79ab60ec97569cb97791d6c32cb55a69c61" gracePeriod=30 Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.189227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerStarted","Data":"2bbf6a03c399448ebf8008a8ecdfb84b2491bf9810dee4ba7b3c78ab7a3c3c38"} Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.190030 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-log" containerID="cri-o://98e2565e5a641e10d3f74128c1ddee908ef25e86c014b0823efa24c0112cf79c" gracePeriod=30 Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.190162 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-httpd" containerID="cri-o://2bbf6a03c399448ebf8008a8ecdfb84b2491bf9810dee4ba7b3c78ab7a3c3c38" gracePeriod=30 Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.213491 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.213475003 podStartE2EDuration="6.213475003s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:54.21267959 +0000 UTC m=+1034.514284887" watchObservedRunningTime="2025-11-22 10:56:54.213475003 +0000 UTC m=+1034.515080290" Nov 22 10:56:54 crc kubenswrapper[4926]: I1122 10:56:54.264628 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.264603413 podStartE2EDuration="6.264603413s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:56:54.240649474 +0000 UTC m=+1034.542254761" watchObservedRunningTime="2025-11-22 10:56:54.264603413 +0000 UTC m=+1034.566208700" Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.201257 4926 generic.go:334] "Generic (PLEG): container finished" podID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerID="49745d297713e283c421b783744db79ab60ec97569cb97791d6c32cb55a69c61" exitCode=143 Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.201290 4926 generic.go:334] "Generic (PLEG): container finished" podID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerID="4217f3c73f0b6ea12b31b2c0266858b418fc8ad4827db6b5e792eea4890f571c" exitCode=143 Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.201350 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerDied","Data":"49745d297713e283c421b783744db79ab60ec97569cb97791d6c32cb55a69c61"} Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.201398 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerDied","Data":"4217f3c73f0b6ea12b31b2c0266858b418fc8ad4827db6b5e792eea4890f571c"} Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.206680 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerID="2bbf6a03c399448ebf8008a8ecdfb84b2491bf9810dee4ba7b3c78ab7a3c3c38" exitCode=143 Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.206737 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerID="98e2565e5a641e10d3f74128c1ddee908ef25e86c014b0823efa24c0112cf79c" exitCode=143 Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.206692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerDied","Data":"2bbf6a03c399448ebf8008a8ecdfb84b2491bf9810dee4ba7b3c78ab7a3c3c38"} Nov 22 10:56:55 crc kubenswrapper[4926]: I1122 10:56:55.206776 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerDied","Data":"98e2565e5a641e10d3f74128c1ddee908ef25e86c014b0823efa24c0112cf79c"} Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.230026 4926 generic.go:334] "Generic (PLEG): container finished" podID="0561d7cf-82b0-4059-b43f-b6d278d7dfc4" containerID="640e770632dc635f94dd5c8ee7a65feacae8118fabc7a4ad702ba4491add1bc9" exitCode=0 Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.230300 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4n9v" event={"ID":"0561d7cf-82b0-4059-b43f-b6d278d7dfc4","Type":"ContainerDied","Data":"640e770632dc635f94dd5c8ee7a65feacae8118fabc7a4ad702ba4491add1bc9"} Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.704033 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.751853 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:56:57 crc kubenswrapper[4926]: E1122 10:56:57.754158 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6683f47-f70a-4631-a355-802d689e6d96" containerName="init" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.754177 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6683f47-f70a-4631-a355-802d689e6d96" containerName="init" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.754354 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6683f47-f70a-4631-a355-802d689e6d96" containerName="init" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.755259 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.759477 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.775346 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.805818 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.805947 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.805984 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.806007 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.806030 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpnrt\" (UniqueName: \"kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.806054 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.806086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.854312 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.892064 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-86dd5d599b-jndzq"] Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.893480 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.903457 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86dd5d599b-jndzq"] Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.912760 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.912851 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.912983 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913020 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913050 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913072 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpnrt\" (UniqueName: \"kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913390 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913493 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.913506 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.914268 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.917737 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.924528 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.932228 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpnrt\" (UniqueName: \"kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:57 crc kubenswrapper[4926]: I1122 10:56:57.941980 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs\") pod \"horizon-67f69cf99d-5jsdr\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015154 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-config-data\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015225 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-scripts\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015363 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz6bz\" (UniqueName: \"kubernetes.io/projected/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-kube-api-access-qz6bz\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015416 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-logs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015693 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-tls-certs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015723 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-combined-ca-bundle\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.015759 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-secret-key\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.073645 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.117620 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-scripts\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.117724 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz6bz\" (UniqueName: \"kubernetes.io/projected/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-kube-api-access-qz6bz\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.118140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-logs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.118255 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-tls-certs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.118274 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-combined-ca-bundle\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.118298 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-secret-key\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.118352 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-config-data\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.119201 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-scripts\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.119593 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-config-data\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.119935 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-logs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.123018 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-secret-key\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.123729 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-combined-ca-bundle\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.134632 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz6bz\" (UniqueName: \"kubernetes.io/projected/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-kube-api-access-qz6bz\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.134953 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/a7c08c13-5c9c-42ac-8fdc-e651c26d97fc-horizon-tls-certs\") pod \"horizon-86dd5d599b-jndzq\" (UID: \"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc\") " pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:58 crc kubenswrapper[4926]: I1122 10:56:58.217538 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:56:59 crc kubenswrapper[4926]: I1122 10:56:59.521607 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:56:59 crc kubenswrapper[4926]: I1122 10:56:59.572506 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:56:59 crc kubenswrapper[4926]: I1122 10:56:59.572730 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-895cf5cf-qk577" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" containerID="cri-o://6113b7fc74d3a90dd151a5da455c660a68b421b636dd0a99fc5c202da42de9bd" gracePeriod=10 Nov 22 10:57:00 crc kubenswrapper[4926]: I1122 10:57:00.275246 4926 generic.go:334] "Generic (PLEG): container finished" podID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerID="6113b7fc74d3a90dd151a5da455c660a68b421b636dd0a99fc5c202da42de9bd" exitCode=0 Nov 22 10:57:00 crc kubenswrapper[4926]: I1122 10:57:00.275346 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-qk577" event={"ID":"db52ce28-2f07-4bdf-8c24-d793e49104d8","Type":"ContainerDied","Data":"6113b7fc74d3a90dd151a5da455c660a68b421b636dd0a99fc5c202da42de9bd"} Nov 22 10:57:02 crc kubenswrapper[4926]: I1122 10:57:02.177308 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-qk577" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 22 10:57:04 crc kubenswrapper[4926]: E1122 10:57:04.957141 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:57:04 crc kubenswrapper[4926]: E1122 10:57:04.957676 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n649h8fhf6h5h54chb9h67fh64h74hd8h697h5f7h4h658hch5c4h54ch67bhfhf4h9h549hfch9dh55hf8h66dh5cbh5c4hf4h5c6h8bq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9dbms,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-6759876769-glz77_openstack(cea79410-ad06-4a0e-9227-20e516784c04): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:04 crc kubenswrapper[4926]: E1122 10:57:04.960295 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-6759876769-glz77" podUID="cea79410-ad06-4a0e-9227-20e516784c04" Nov 22 10:57:07 crc kubenswrapper[4926]: I1122 10:57:07.177944 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-qk577" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: connect: connection refused" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.166411 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.166870 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cclgd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-5lxk9_openstack(bee0e7d2-310f-4b01-8f79-83f113613329): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.168244 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-5lxk9" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.196210 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.196367 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n64ch8ch688h657h5f7hc6h97h57bh586h55bh658h584hcch68dh5d9h694h5bch98h99h69h577h565h5b9hcfh5f5h568h549h5f4h7ch686h656hd5q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-l2jbl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-659f95cfc-dgn4q_openstack(29fecf58-02ca-475e-9394-6205a9cdc086): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.199014 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-659f95cfc-dgn4q" podUID="29fecf58-02ca-475e-9394-6205a9cdc086" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.202767 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.202908 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n585h668hfch6ch584hf6h57fhd9h58fh78h646h5ffh645h68hc6h548h546h9bh569h68fh54ch644h5c6h574h584hf5h647h5c4h697h6ch5c5hbcq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6m7mn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5b8ff8d89f-n9mb9_openstack(3b9d3be5-3a32-4378-aeb5-db92457a390f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.205520 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5b8ff8d89f-n9mb9" podUID="3b9d3be5-3a32-4378-aeb5-db92457a390f" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.277271 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.355775 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-g4n9v" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.355805 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-g4n9v" event={"ID":"0561d7cf-82b0-4059-b43f-b6d278d7dfc4","Type":"ContainerDied","Data":"b3dbee8de412600815c2ae66f10010e59ca33fa55259aae69d3bfb3518f106d0"} Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.355826 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b3dbee8de412600815c2ae66f10010e59ca33fa55259aae69d3bfb3518f106d0" Nov 22 10:57:08 crc kubenswrapper[4926]: E1122 10:57:08.357519 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-5lxk9" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.438874 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.438951 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqtgl\" (UniqueName: \"kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.439007 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.439035 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.439792 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.439869 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys\") pod \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\" (UID: \"0561d7cf-82b0-4059-b43f-b6d278d7dfc4\") " Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.448408 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.448433 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.469794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts" (OuterVolumeSpecName: "scripts") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.469801 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl" (OuterVolumeSpecName: "kube-api-access-mqtgl") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "kube-api-access-mqtgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.474965 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data" (OuterVolumeSpecName: "config-data") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.480090 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0561d7cf-82b0-4059-b43f-b6d278d7dfc4" (UID: "0561d7cf-82b0-4059-b43f-b6d278d7dfc4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543320 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543356 4926 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543370 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543383 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqtgl\" (UniqueName: \"kubernetes.io/projected/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-kube-api-access-mqtgl\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543396 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:08 crc kubenswrapper[4926]: I1122 10:57:08.543406 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0561d7cf-82b0-4059-b43f-b6d278d7dfc4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.371933 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-g4n9v"] Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.382074 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-g4n9v"] Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.457967 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7fq8n"] Nov 22 10:57:09 crc kubenswrapper[4926]: E1122 10:57:09.458442 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0561d7cf-82b0-4059-b43f-b6d278d7dfc4" containerName="keystone-bootstrap" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.458479 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0561d7cf-82b0-4059-b43f-b6d278d7dfc4" containerName="keystone-bootstrap" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.458758 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0561d7cf-82b0-4059-b43f-b6d278d7dfc4" containerName="keystone-bootstrap" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.459538 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.462601 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.462640 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.462880 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.462944 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kngh7" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.463613 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.478801 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7fq8n"] Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.579782 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.579943 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.580094 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.580223 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.580319 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.580448 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.684975 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.685204 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.685230 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.685301 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.685332 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.685390 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.690619 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.698392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.700609 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.700988 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.701044 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.703405 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j\") pod \"keystone-bootstrap-7fq8n\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:09 crc kubenswrapper[4926]: I1122 10:57:09.789329 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:10 crc kubenswrapper[4926]: I1122 10:57:10.602208 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0561d7cf-82b0-4059-b43f-b6d278d7dfc4" path="/var/lib/kubelet/pods/0561d7cf-82b0-4059-b43f-b6d278d7dfc4/volumes" Nov 22 10:57:15 crc kubenswrapper[4926]: I1122 10:57:15.987417 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6759876769-glz77" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.103853 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key\") pod \"cea79410-ad06-4a0e-9227-20e516784c04\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.103970 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data\") pod \"cea79410-ad06-4a0e-9227-20e516784c04\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.104000 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dbms\" (UniqueName: \"kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms\") pod \"cea79410-ad06-4a0e-9227-20e516784c04\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.104064 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs\") pod \"cea79410-ad06-4a0e-9227-20e516784c04\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.104119 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts\") pod \"cea79410-ad06-4a0e-9227-20e516784c04\" (UID: \"cea79410-ad06-4a0e-9227-20e516784c04\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.105022 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts" (OuterVolumeSpecName: "scripts") pod "cea79410-ad06-4a0e-9227-20e516784c04" (UID: "cea79410-ad06-4a0e-9227-20e516784c04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.106077 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data" (OuterVolumeSpecName: "config-data") pod "cea79410-ad06-4a0e-9227-20e516784c04" (UID: "cea79410-ad06-4a0e-9227-20e516784c04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.106107 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs" (OuterVolumeSpecName: "logs") pod "cea79410-ad06-4a0e-9227-20e516784c04" (UID: "cea79410-ad06-4a0e-9227-20e516784c04"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.127078 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "cea79410-ad06-4a0e-9227-20e516784c04" (UID: "cea79410-ad06-4a0e-9227-20e516784c04"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.129502 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms" (OuterVolumeSpecName: "kube-api-access-9dbms") pod "cea79410-ad06-4a0e-9227-20e516784c04" (UID: "cea79410-ad06-4a0e-9227-20e516784c04"). InnerVolumeSpecName "kube-api-access-9dbms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.206157 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/cea79410-ad06-4a0e-9227-20e516784c04-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.206198 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.206211 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dbms\" (UniqueName: \"kubernetes.io/projected/cea79410-ad06-4a0e-9227-20e516784c04-kube-api-access-9dbms\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.206223 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cea79410-ad06-4a0e-9227-20e516784c04-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.206235 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cea79410-ad06-4a0e-9227-20e516784c04-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.427246 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6759876769-glz77" event={"ID":"cea79410-ad06-4a0e-9227-20e516784c04","Type":"ContainerDied","Data":"caf92f5a4c37a09e4cf09d080246d42abe5848eb7f8a5f273ddf31e8906d34d5"} Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.427292 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6759876769-glz77" Nov 22 10:57:16 crc kubenswrapper[4926]: E1122 10:57:16.429012 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 22 10:57:16 crc kubenswrapper[4926]: E1122 10:57:16.429170 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xmqvk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-bw5rr_openstack(05bbddb1-c370-4805-9f91-373535d67f52): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:16 crc kubenswrapper[4926]: E1122 10:57:16.430370 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-bw5rr" podUID="05bbddb1-c370-4805-9f91-373535d67f52" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.582908 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.589968 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.609923 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.611183 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.614243 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6759876769-glz77"] Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.628372 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.726837 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.726912 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.726947 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs\") pod \"3b9d3be5-3a32-4378-aeb5-db92457a390f\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.726973 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data\") pod \"3b9d3be5-3a32-4378-aeb5-db92457a390f\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727012 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssvdc\" (UniqueName: \"kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727045 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727071 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727091 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727126 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727186 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727218 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727254 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727285 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727310 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727334 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727373 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727411 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts\") pod \"3b9d3be5-3a32-4378-aeb5-db92457a390f\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727440 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727466 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key\") pod \"3b9d3be5-3a32-4378-aeb5-db92457a390f\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727492 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m7mn\" (UniqueName: \"kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn\") pod \"3b9d3be5-3a32-4378-aeb5-db92457a390f\" (UID: \"3b9d3be5-3a32-4378-aeb5-db92457a390f\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727527 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727553 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727784 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5p6s\" (UniqueName: \"kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727844 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb\") pod \"db52ce28-2f07-4bdf-8c24-d793e49104d8\" (UID: \"db52ce28-2f07-4bdf-8c24-d793e49104d8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727883 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs\") pod \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\" (UID: \"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727936 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.727965 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crkz5\" (UniqueName: \"kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5\") pod \"048c5331-284b-4ab2-aace-dbba96b8bc01\" (UID: \"048c5331-284b-4ab2-aace-dbba96b8bc01\") " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.728461 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs" (OuterVolumeSpecName: "logs") pod "3b9d3be5-3a32-4378-aeb5-db92457a390f" (UID: "3b9d3be5-3a32-4378-aeb5-db92457a390f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.728900 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data" (OuterVolumeSpecName: "config-data") pod "3b9d3be5-3a32-4378-aeb5-db92457a390f" (UID: "3b9d3be5-3a32-4378-aeb5-db92457a390f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.730129 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs" (OuterVolumeSpecName: "logs") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.731648 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts" (OuterVolumeSpecName: "scripts") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.732905 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs" (OuterVolumeSpecName: "logs") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.736301 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s" (OuterVolumeSpecName: "kube-api-access-v5p6s") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "kube-api-access-v5p6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.737327 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts" (OuterVolumeSpecName: "scripts") pod "3b9d3be5-3a32-4378-aeb5-db92457a390f" (UID: "3b9d3be5-3a32-4378-aeb5-db92457a390f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.737856 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc" (OuterVolumeSpecName: "kube-api-access-ssvdc") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "kube-api-access-ssvdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.738063 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.738555 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.742501 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.743578 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.747979 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts" (OuterVolumeSpecName: "scripts") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.749308 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn" (OuterVolumeSpecName: "kube-api-access-6m7mn") pod "3b9d3be5-3a32-4378-aeb5-db92457a390f" (UID: "3b9d3be5-3a32-4378-aeb5-db92457a390f"). InnerVolumeSpecName "kube-api-access-6m7mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.751590 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5" (OuterVolumeSpecName: "kube-api-access-crkz5") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "kube-api-access-crkz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.763468 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3b9d3be5-3a32-4378-aeb5-db92457a390f" (UID: "3b9d3be5-3a32-4378-aeb5-db92457a390f"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.771068 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.787467 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.789984 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.791204 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.791574 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.794205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config" (OuterVolumeSpecName: "config") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.804743 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data" (OuterVolumeSpecName: "config-data") pod "048c5331-284b-4ab2-aace-dbba96b8bc01" (UID: "048c5331-284b-4ab2-aace-dbba96b8bc01"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.806393 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.809043 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data" (OuterVolumeSpecName: "config-data") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.810592 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" (UID: "1ea354a7-08ce-41f2-8f72-ba6a0782c5f8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.815648 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "db52ce28-2f07-4bdf-8c24-d793e49104d8" (UID: "db52ce28-2f07-4bdf-8c24-d793e49104d8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829855 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829903 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829936 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829949 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crkz5\" (UniqueName: \"kubernetes.io/projected/048c5331-284b-4ab2-aace-dbba96b8bc01-kube-api-access-crkz5\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829963 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829974 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829985 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3b9d3be5-3a32-4378-aeb5-db92457a390f-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.829996 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830006 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssvdc\" (UniqueName: \"kubernetes.io/projected/db52ce28-2f07-4bdf-8c24-d793e49104d8-kube-api-access-ssvdc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830016 4926 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830027 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830038 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830051 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830062 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830073 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830083 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830093 4926 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830104 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830122 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830133 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830144 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3b9d3be5-3a32-4378-aeb5-db92457a390f-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830154 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/db52ce28-2f07-4bdf-8c24-d793e49104d8-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830162 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3b9d3be5-3a32-4378-aeb5-db92457a390f-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830170 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m7mn\" (UniqueName: \"kubernetes.io/projected/3b9d3be5-3a32-4378-aeb5-db92457a390f-kube-api-access-6m7mn\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830179 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/048c5331-284b-4ab2-aace-dbba96b8bc01-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830187 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/048c5331-284b-4ab2-aace-dbba96b8bc01-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.830194 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5p6s\" (UniqueName: \"kubernetes.io/projected/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8-kube-api-access-v5p6s\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.846869 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.853260 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.931512 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:16 crc kubenswrapper[4926]: I1122 10:57:16.931568 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.177769 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-qk577" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.127:5353: i/o timeout" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.177866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.440140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1ea354a7-08ce-41f2-8f72-ba6a0782c5f8","Type":"ContainerDied","Data":"0b6b1c0a1b96d6c49bf5912b55e2aa3301c1cc5a0955b36157889a7ec04210d7"} Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.440208 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.440264 4926 scope.go:117] "RemoveContainer" containerID="2bbf6a03c399448ebf8008a8ecdfb84b2491bf9810dee4ba7b3c78ab7a3c3c38" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.443273 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"048c5331-284b-4ab2-aace-dbba96b8bc01","Type":"ContainerDied","Data":"d406f6ca33481c0a1f5a54ed69dcfc47414696b8221fc46957799064e710eaa8"} Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.443423 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.451298 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5b8ff8d89f-n9mb9" event={"ID":"3b9d3be5-3a32-4378-aeb5-db92457a390f","Type":"ContainerDied","Data":"f1f38623089c6ddf2dca3492c4e0c0f9b418dbf9332fe83ab1ca8126037ef4b4"} Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.451457 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5b8ff8d89f-n9mb9" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.463283 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-qk577" event={"ID":"db52ce28-2f07-4bdf-8c24-d793e49104d8","Type":"ContainerDied","Data":"a598667724c79f020a465ab929a93efce9a89703415e680fb164bb1af0810214"} Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.464190 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-qk577" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.474097 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-bw5rr" podUID="05bbddb1-c370-4805-9f91-373535d67f52" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.512739 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.536427 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.561304 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.568154 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.576963 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577438 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577461 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577485 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="init" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577493 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="init" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577516 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577524 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577540 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577549 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577567 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577574 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: E1122 10:57:17.577604 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577612 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577817 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577832 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-httpd" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577843 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" containerName="dnsmasq-dns" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577860 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.577873 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" containerName="glance-log" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.579072 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.583058 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.584118 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.584180 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-c98zk" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.584374 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.589866 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.592092 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.595345 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.596666 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.598479 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.641178 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.649477 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5b8ff8d89f-n9mb9"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.658953 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.667212 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.675877 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-qk577"] Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.745876 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746044 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746124 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746163 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746193 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp4hb\" (UniqueName: \"kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746211 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rm4zg\" (UniqueName: \"kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746233 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746290 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.746372 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747111 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747156 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747216 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747248 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747451 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.747488 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850153 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850203 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850227 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850250 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850273 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850292 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850313 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp4hb\" (UniqueName: \"kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rm4zg\" (UniqueName: \"kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850348 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850376 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850414 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850446 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850484 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850501 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850552 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.850783 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.853987 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.854050 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.854271 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.854292 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.854640 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.859856 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.859908 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.859858 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.860140 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.872922 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rm4zg\" (UniqueName: \"kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.874033 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.874634 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.877796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.878216 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.881535 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp4hb\" (UniqueName: \"kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.902853 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " pod="openstack/glance-default-external-api-0" Nov 22 10:57:17 crc kubenswrapper[4926]: I1122 10:57:17.916735 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.197953 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.216780 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.594076 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="048c5331-284b-4ab2-aace-dbba96b8bc01" path="/var/lib/kubelet/pods/048c5331-284b-4ab2-aace-dbba96b8bc01/volumes" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.595176 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ea354a7-08ce-41f2-8f72-ba6a0782c5f8" path="/var/lib/kubelet/pods/1ea354a7-08ce-41f2-8f72-ba6a0782c5f8/volumes" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.596021 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b9d3be5-3a32-4378-aeb5-db92457a390f" path="/var/lib/kubelet/pods/3b9d3be5-3a32-4378-aeb5-db92457a390f/volumes" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.597225 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cea79410-ad06-4a0e-9227-20e516784c04" path="/var/lib/kubelet/pods/cea79410-ad06-4a0e-9227-20e516784c04/volumes" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.597647 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db52ce28-2f07-4bdf-8c24-d793e49104d8" path="/var/lib/kubelet/pods/db52ce28-2f07-4bdf-8c24-d793e49104d8/volumes" Nov 22 10:57:18 crc kubenswrapper[4926]: E1122 10:57:18.673589 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 22 10:57:18 crc kubenswrapper[4926]: E1122 10:57:18.673777 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x668t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-fh2gk_openstack(63e63df0-e7ff-46a2-9b1d-60be115851ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 10:57:18 crc kubenswrapper[4926]: E1122 10:57:18.675571 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-fh2gk" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.718036 4926 scope.go:117] "RemoveContainer" containerID="98e2565e5a641e10d3f74128c1ddee908ef25e86c014b0823efa24c0112cf79c" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.744522 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.870588 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs\") pod \"29fecf58-02ca-475e-9394-6205a9cdc086\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.870647 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts\") pod \"29fecf58-02ca-475e-9394-6205a9cdc086\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.871067 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs" (OuterVolumeSpecName: "logs") pod "29fecf58-02ca-475e-9394-6205a9cdc086" (UID: "29fecf58-02ca-475e-9394-6205a9cdc086"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.871216 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2jbl\" (UniqueName: \"kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl\") pod \"29fecf58-02ca-475e-9394-6205a9cdc086\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.871252 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data\") pod \"29fecf58-02ca-475e-9394-6205a9cdc086\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.871281 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key\") pod \"29fecf58-02ca-475e-9394-6205a9cdc086\" (UID: \"29fecf58-02ca-475e-9394-6205a9cdc086\") " Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.871696 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/29fecf58-02ca-475e-9394-6205a9cdc086-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.872620 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts" (OuterVolumeSpecName: "scripts") pod "29fecf58-02ca-475e-9394-6205a9cdc086" (UID: "29fecf58-02ca-475e-9394-6205a9cdc086"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.873280 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data" (OuterVolumeSpecName: "config-data") pod "29fecf58-02ca-475e-9394-6205a9cdc086" (UID: "29fecf58-02ca-475e-9394-6205a9cdc086"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.893459 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl" (OuterVolumeSpecName: "kube-api-access-l2jbl") pod "29fecf58-02ca-475e-9394-6205a9cdc086" (UID: "29fecf58-02ca-475e-9394-6205a9cdc086"). InnerVolumeSpecName "kube-api-access-l2jbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.896337 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "29fecf58-02ca-475e-9394-6205a9cdc086" (UID: "29fecf58-02ca-475e-9394-6205a9cdc086"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.908637 4926 scope.go:117] "RemoveContainer" containerID="49745d297713e283c421b783744db79ab60ec97569cb97791d6c32cb55a69c61" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.965183 4926 scope.go:117] "RemoveContainer" containerID="4217f3c73f0b6ea12b31b2c0266858b418fc8ad4827db6b5e792eea4890f571c" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.978124 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.978150 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2jbl\" (UniqueName: \"kubernetes.io/projected/29fecf58-02ca-475e-9394-6205a9cdc086-kube-api-access-l2jbl\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.978161 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/29fecf58-02ca-475e-9394-6205a9cdc086-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:18 crc kubenswrapper[4926]: I1122 10:57:18.978170 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/29fecf58-02ca-475e-9394-6205a9cdc086-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.025652 4926 scope.go:117] "RemoveContainer" containerID="6113b7fc74d3a90dd151a5da455c660a68b421b636dd0a99fc5c202da42de9bd" Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.071100 4926 scope.go:117] "RemoveContainer" containerID="fe42f298dc9f0227096ee7814c8e92300a986c8b7602efd6144bd8d4b60ece35" Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.174622 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-86dd5d599b-jndzq"] Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.297994 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.382131 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7fq8n"] Nov 22 10:57:19 crc kubenswrapper[4926]: W1122 10:57:19.382935 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod116d9793_8efe_45b6_8a2c_5f4c990346ad.slice/crio-53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a WatchSource:0}: Error finding container 53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a: Status 404 returned error can't find the container with id 53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.480541 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerStarted","Data":"e688e9a218a7f4ffba3d9e0461073371f898db61a336aa1faad7da5c0a7172c7"} Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.481712 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-659f95cfc-dgn4q" event={"ID":"29fecf58-02ca-475e-9394-6205a9cdc086","Type":"ContainerDied","Data":"23790c520d6a7468a6d6347f38d102544bf78a459f8313c61e16280d975ff347"} Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.481730 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-659f95cfc-dgn4q" Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.488596 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerStarted","Data":"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8"} Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.498781 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7fq8n" event={"ID":"116d9793-8efe-45b6-8a2c-5f4c990346ad","Type":"ContainerStarted","Data":"53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a"} Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.500728 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86dd5d599b-jndzq" event={"ID":"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc","Type":"ContainerStarted","Data":"9054c6f6ab9c48b5de81036c860faac1b8008a2ab549e19d05c5285a97c19c3e"} Nov 22 10:57:19 crc kubenswrapper[4926]: E1122 10:57:19.503181 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-fh2gk" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.540692 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.549442 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-659f95cfc-dgn4q"] Nov 22 10:57:19 crc kubenswrapper[4926]: I1122 10:57:19.655299 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:57:19 crc kubenswrapper[4926]: W1122 10:57:19.668850 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32257092_e014_4913_99e3_a92b522301e2.slice/crio-8df5587a64edbe675818ae6b7dd0a47a2af4402623835fe32163ae7705ed6021 WatchSource:0}: Error finding container 8df5587a64edbe675818ae6b7dd0a47a2af4402623835fe32163ae7705ed6021: Status 404 returned error can't find the container with id 8df5587a64edbe675818ae6b7dd0a47a2af4402623835fe32163ae7705ed6021 Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.518900 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerStarted","Data":"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.519470 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerStarted","Data":"8df5587a64edbe675818ae6b7dd0a47a2af4402623835fe32163ae7705ed6021"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.521274 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86dd5d599b-jndzq" event={"ID":"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc","Type":"ContainerStarted","Data":"27da816e861e204b2fe28ea582d4c09af9286710f2a7fdc4c0483abf06d5be4b"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.521303 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-86dd5d599b-jndzq" event={"ID":"a7c08c13-5c9c-42ac-8fdc-e651c26d97fc","Type":"ContainerStarted","Data":"7c1648be5a1e86b35fd43a5c5d753fbc83a39cce2e3b709f7bfe57d2f16a8ca7"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.526385 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerStarted","Data":"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.533648 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerStarted","Data":"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.535717 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7fq8n" event={"ID":"116d9793-8efe-45b6-8a2c-5f4c990346ad","Type":"ContainerStarted","Data":"7ecf40dc56bebd6106020e390ebded6bc6a7510562fa3d0a926841e9514a17d0"} Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.553683 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-86dd5d599b-jndzq" podStartSLOduration=23.005799844 podStartE2EDuration="23.553661682s" podCreationTimestamp="2025-11-22 10:56:57 +0000 UTC" firstStartedPulling="2025-11-22 10:57:19.182004778 +0000 UTC m=+1059.483610065" lastFinishedPulling="2025-11-22 10:57:19.729866616 +0000 UTC m=+1060.031471903" observedRunningTime="2025-11-22 10:57:20.552054986 +0000 UTC m=+1060.853660273" watchObservedRunningTime="2025-11-22 10:57:20.553661682 +0000 UTC m=+1060.855266969" Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.577101 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.580994 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7fq8n" podStartSLOduration=11.580974112 podStartE2EDuration="11.580974112s" podCreationTimestamp="2025-11-22 10:57:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:20.575671441 +0000 UTC m=+1060.877276728" watchObservedRunningTime="2025-11-22 10:57:20.580974112 +0000 UTC m=+1060.882579399" Nov 22 10:57:20 crc kubenswrapper[4926]: I1122 10:57:20.594546 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29fecf58-02ca-475e-9394-6205a9cdc086" path="/var/lib/kubelet/pods/29fecf58-02ca-475e-9394-6205a9cdc086/volumes" Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.553656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerStarted","Data":"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195"} Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.560987 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerStarted","Data":"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7"} Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.561240 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerStarted","Data":"eca2b1377003b3f000c58f3734b9b2830d35ae6899a9a6adfe7d48b1ffc5faca"} Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.573433 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerStarted","Data":"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014"} Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.583345 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5lxk9" event={"ID":"bee0e7d2-310f-4b01-8f79-83f113613329","Type":"ContainerStarted","Data":"14cb77830667d8b49447cc008aa373e5dd08cbbdab2bb701152d606c6682be4c"} Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.586075 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.586055077 podStartE2EDuration="4.586055077s" podCreationTimestamp="2025-11-22 10:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:21.57599829 +0000 UTC m=+1061.877603577" watchObservedRunningTime="2025-11-22 10:57:21.586055077 +0000 UTC m=+1061.887660364" Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.605585 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-67f69cf99d-5jsdr" podStartSLOduration=23.728557129 podStartE2EDuration="24.605565845s" podCreationTimestamp="2025-11-22 10:56:57 +0000 UTC" firstStartedPulling="2025-11-22 10:57:19.304585551 +0000 UTC m=+1059.606190838" lastFinishedPulling="2025-11-22 10:57:20.181594227 +0000 UTC m=+1060.483199554" observedRunningTime="2025-11-22 10:57:21.601563971 +0000 UTC m=+1061.903169258" watchObservedRunningTime="2025-11-22 10:57:21.605565845 +0000 UTC m=+1061.907171132" Nov 22 10:57:21 crc kubenswrapper[4926]: I1122 10:57:21.624703 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-5lxk9" podStartSLOduration=1.949977791 podStartE2EDuration="32.624686802s" podCreationTimestamp="2025-11-22 10:56:49 +0000 UTC" firstStartedPulling="2025-11-22 10:56:50.535256139 +0000 UTC m=+1030.836861426" lastFinishedPulling="2025-11-22 10:57:21.20996515 +0000 UTC m=+1061.511570437" observedRunningTime="2025-11-22 10:57:21.620817691 +0000 UTC m=+1061.922422978" watchObservedRunningTime="2025-11-22 10:57:21.624686802 +0000 UTC m=+1061.926292089" Nov 22 10:57:22 crc kubenswrapper[4926]: I1122 10:57:22.596638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerStarted","Data":"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec"} Nov 22 10:57:22 crc kubenswrapper[4926]: I1122 10:57:22.598383 4926 generic.go:334] "Generic (PLEG): container finished" podID="6fa25590-aff0-48a4-ac01-3671555d7b1a" containerID="652341ca9c543b35f54c8c72a1597769830475881f229f288f558046f98cdc1b" exitCode=0 Nov 22 10:57:22 crc kubenswrapper[4926]: I1122 10:57:22.598472 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d6xxc" event={"ID":"6fa25590-aff0-48a4-ac01-3671555d7b1a","Type":"ContainerDied","Data":"652341ca9c543b35f54c8c72a1597769830475881f229f288f558046f98cdc1b"} Nov 22 10:57:22 crc kubenswrapper[4926]: I1122 10:57:22.626759 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.626739821 podStartE2EDuration="5.626739821s" podCreationTimestamp="2025-11-22 10:57:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:22.619958847 +0000 UTC m=+1062.921564144" watchObservedRunningTime="2025-11-22 10:57:22.626739821 +0000 UTC m=+1062.928345108" Nov 22 10:57:23 crc kubenswrapper[4926]: I1122 10:57:23.615836 4926 generic.go:334] "Generic (PLEG): container finished" podID="bee0e7d2-310f-4b01-8f79-83f113613329" containerID="14cb77830667d8b49447cc008aa373e5dd08cbbdab2bb701152d606c6682be4c" exitCode=0 Nov 22 10:57:23 crc kubenswrapper[4926]: I1122 10:57:23.615911 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5lxk9" event={"ID":"bee0e7d2-310f-4b01-8f79-83f113613329","Type":"ContainerDied","Data":"14cb77830667d8b49447cc008aa373e5dd08cbbdab2bb701152d606c6682be4c"} Nov 22 10:57:23 crc kubenswrapper[4926]: I1122 10:57:23.623551 4926 generic.go:334] "Generic (PLEG): container finished" podID="116d9793-8efe-45b6-8a2c-5f4c990346ad" containerID="7ecf40dc56bebd6106020e390ebded6bc6a7510562fa3d0a926841e9514a17d0" exitCode=0 Nov 22 10:57:23 crc kubenswrapper[4926]: I1122 10:57:23.624696 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7fq8n" event={"ID":"116d9793-8efe-45b6-8a2c-5f4c990346ad","Type":"ContainerDied","Data":"7ecf40dc56bebd6106020e390ebded6bc6a7510562fa3d0a926841e9514a17d0"} Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.088608 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.095763 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5lxk9" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.102995 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209491 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209542 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cclgd\" (UniqueName: \"kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd\") pod \"bee0e7d2-310f-4b01-8f79-83f113613329\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209626 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs\") pod \"bee0e7d2-310f-4b01-8f79-83f113613329\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209662 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209695 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209728 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle\") pod \"bee0e7d2-310f-4b01-8f79-83f113613329\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209803 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts\") pod \"bee0e7d2-310f-4b01-8f79-83f113613329\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209828 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6bbn\" (UniqueName: \"kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn\") pod \"6fa25590-aff0-48a4-ac01-3671555d7b1a\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209862 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle\") pod \"6fa25590-aff0-48a4-ac01-3671555d7b1a\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209937 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config\") pod \"6fa25590-aff0-48a4-ac01-3671555d7b1a\" (UID: \"6fa25590-aff0-48a4-ac01-3671555d7b1a\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.209964 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.210002 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys\") pod \"116d9793-8efe-45b6-8a2c-5f4c990346ad\" (UID: \"116d9793-8efe-45b6-8a2c-5f4c990346ad\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.210034 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data\") pod \"bee0e7d2-310f-4b01-8f79-83f113613329\" (UID: \"bee0e7d2-310f-4b01-8f79-83f113613329\") " Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.210130 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs" (OuterVolumeSpecName: "logs") pod "bee0e7d2-310f-4b01-8f79-83f113613329" (UID: "bee0e7d2-310f-4b01-8f79-83f113613329"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.210503 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bee0e7d2-310f-4b01-8f79-83f113613329-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.216148 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts" (OuterVolumeSpecName: "scripts") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.216335 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts" (OuterVolumeSpecName: "scripts") pod "bee0e7d2-310f-4b01-8f79-83f113613329" (UID: "bee0e7d2-310f-4b01-8f79-83f113613329"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.216333 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.218101 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd" (OuterVolumeSpecName: "kube-api-access-cclgd") pod "bee0e7d2-310f-4b01-8f79-83f113613329" (UID: "bee0e7d2-310f-4b01-8f79-83f113613329"). InnerVolumeSpecName "kube-api-access-cclgd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.231347 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn" (OuterVolumeSpecName: "kube-api-access-p6bbn") pod "6fa25590-aff0-48a4-ac01-3671555d7b1a" (UID: "6fa25590-aff0-48a4-ac01-3671555d7b1a"). InnerVolumeSpecName "kube-api-access-p6bbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.243957 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.248139 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data" (OuterVolumeSpecName: "config-data") pod "bee0e7d2-310f-4b01-8f79-83f113613329" (UID: "bee0e7d2-310f-4b01-8f79-83f113613329"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.248147 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fa25590-aff0-48a4-ac01-3671555d7b1a" (UID: "6fa25590-aff0-48a4-ac01-3671555d7b1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.248283 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j" (OuterVolumeSpecName: "kube-api-access-zr59j") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "kube-api-access-zr59j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.263147 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bee0e7d2-310f-4b01-8f79-83f113613329" (UID: "bee0e7d2-310f-4b01-8f79-83f113613329"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.265242 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data" (OuterVolumeSpecName: "config-data") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.272943 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config" (OuterVolumeSpecName: "config") pod "6fa25590-aff0-48a4-ac01-3671555d7b1a" (UID: "6fa25590-aff0-48a4-ac01-3671555d7b1a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.273467 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "116d9793-8efe-45b6-8a2c-5f4c990346ad" (UID: "116d9793-8efe-45b6-8a2c-5f4c990346ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.311502 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.311744 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6fa25590-aff0-48a4-ac01-3671555d7b1a-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.311823 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/116d9793-8efe-45b6-8a2c-5f4c990346ad-kube-api-access-zr59j\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.311921 4926 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312001 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312062 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312113 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cclgd\" (UniqueName: \"kubernetes.io/projected/bee0e7d2-310f-4b01-8f79-83f113613329-kube-api-access-cclgd\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312160 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312213 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312267 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312323 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/116d9793-8efe-45b6-8a2c-5f4c990346ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312497 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bee0e7d2-310f-4b01-8f79-83f113613329-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.312550 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6bbn\" (UniqueName: \"kubernetes.io/projected/6fa25590-aff0-48a4-ac01-3671555d7b1a-kube-api-access-p6bbn\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.665348 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5lxk9" event={"ID":"bee0e7d2-310f-4b01-8f79-83f113613329","Type":"ContainerDied","Data":"6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917"} Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.665715 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6f6f9f835334b62faa4e4038a346528b24dfbe944d01cdba1f346c6d2b9be917" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.665784 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5lxk9" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.669287 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d6xxc" event={"ID":"6fa25590-aff0-48a4-ac01-3671555d7b1a","Type":"ContainerDied","Data":"cd06671e38b61d325fc4719c67ed4a6e865e3c3dfe5106ed7f782f212a75828b"} Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.669325 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd06671e38b61d325fc4719c67ed4a6e865e3c3dfe5106ed7f782f212a75828b" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.669581 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d6xxc" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.674343 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerStarted","Data":"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc"} Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.677622 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7fq8n" event={"ID":"116d9793-8efe-45b6-8a2c-5f4c990346ad","Type":"ContainerDied","Data":"53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a"} Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.677662 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53fc6a85d2ffdbc5656160cd68775dd280afa79b26d7fa74f4a1b7408173bc2a" Nov 22 10:57:26 crc kubenswrapper[4926]: I1122 10:57:26.677750 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7fq8n" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.229944 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-68769dd845-84s2z"] Nov 22 10:57:27 crc kubenswrapper[4926]: E1122 10:57:27.238583 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="116d9793-8efe-45b6-8a2c-5f4c990346ad" containerName="keystone-bootstrap" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238619 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="116d9793-8efe-45b6-8a2c-5f4c990346ad" containerName="keystone-bootstrap" Nov 22 10:57:27 crc kubenswrapper[4926]: E1122 10:57:27.238646 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa25590-aff0-48a4-ac01-3671555d7b1a" containerName="neutron-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238653 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa25590-aff0-48a4-ac01-3671555d7b1a" containerName="neutron-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: E1122 10:57:27.238672 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" containerName="placement-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238682 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" containerName="placement-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238921 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" containerName="placement-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238948 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa25590-aff0-48a4-ac01-3671555d7b1a" containerName="neutron-db-sync" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.238957 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="116d9793-8efe-45b6-8a2c-5f4c990346ad" containerName="keystone-bootstrap" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.239599 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.242778 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.242881 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-kngh7" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.243111 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.243254 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.243470 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.245871 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.253274 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-68769dd845-84s2z"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.351783 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-65b67ff7d-d2fkp"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.365394 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378168 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-fernet-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378281 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-combined-ca-bundle\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378363 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgf9x\" (UniqueName: \"kubernetes.io/projected/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-kube-api-access-mgf9x\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378434 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-config-data\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378648 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-scripts\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378682 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-credential-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378751 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-public-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.378816 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-internal-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.392306 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.393049 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.393107 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.393175 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-vjw4c" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.393307 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.457032 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65b67ff7d-d2fkp"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.480386 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-scripts\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481056 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5fcfb96-741e-467c-971f-762618aa54d5-logs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481335 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-fernet-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-public-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481557 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-combined-ca-bundle\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481808 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgf9x\" (UniqueName: \"kubernetes.io/projected/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-kube-api-access-mgf9x\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.481903 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-config-data\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482054 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-scripts\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482131 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-credential-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-public-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482321 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-internal-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482406 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-internal-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482502 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xz4r\" (UniqueName: \"kubernetes.io/projected/c5fcfb96-741e-467c-971f-762618aa54d5-kube-api-access-2xz4r\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482589 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-combined-ca-bundle\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.482683 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-config-data\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.503898 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-public-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.505691 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-config-data\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.506163 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-internal-tls-certs\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.506664 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-credential-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.503792 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.507051 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-fernet-keys\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.508548 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.522637 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-scripts\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.522645 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-combined-ca-bundle\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.522816 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgf9x\" (UniqueName: \"kubernetes.io/projected/3c8571ff-d236-4cc6-aebe-ffa8be3ef604-kube-api-access-mgf9x\") pod \"keystone-68769dd845-84s2z\" (UID: \"3c8571ff-d236-4cc6-aebe-ffa8be3ef604\") " pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.564742 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.571526 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584676 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5fcfb96-741e-467c-971f-762618aa54d5-logs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584719 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-public-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584756 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584794 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsx2t\" (UniqueName: \"kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584847 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584862 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584877 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584902 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-internal-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584939 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584965 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xz4r\" (UniqueName: \"kubernetes.io/projected/c5fcfb96-741e-467c-971f-762618aa54d5-kube-api-access-2xz4r\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.584984 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-combined-ca-bundle\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.585001 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-config-data\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.585016 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-scripts\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.585775 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c5fcfb96-741e-467c-971f-762618aa54d5-logs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.591266 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-public-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.593307 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.596620 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.599900 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-internal-tls-certs\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.600989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-config-data\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.603490 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.603733 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.603778 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-scripts\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.603843 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-vns5p" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.604182 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.604762 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5fcfb96-741e-467c-971f-762618aa54d5-combined-ca-bundle\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.611337 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.616989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xz4r\" (UniqueName: \"kubernetes.io/projected/c5fcfb96-741e-467c-971f-762618aa54d5-kube-api-access-2xz4r\") pod \"placement-65b67ff7d-d2fkp\" (UID: \"c5fcfb96-741e-467c-971f-762618aa54d5\") " pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.686143 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.686182 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5drgf\" (UniqueName: \"kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687411 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687440 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687488 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687556 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687592 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687617 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.687686 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsx2t\" (UniqueName: \"kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.688273 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.689040 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.689567 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.689824 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.690535 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.707920 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsx2t\" (UniqueName: \"kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t\") pod \"dnsmasq-dns-5ccc5c4795-nkxbv\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.710595 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.738500 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.789685 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.790028 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5drgf\" (UniqueName: \"kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.790321 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.790368 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.790420 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.796743 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.801061 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.803771 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.805377 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.809348 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5drgf\" (UniqueName: \"kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf\") pod \"neutron-64c985686b-kqzqh\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:27 crc kubenswrapper[4926]: I1122 10:57:27.975558 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-68769dd845-84s2z"] Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.035334 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.074753 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.074841 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.206341 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.206397 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.219225 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.219284 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.219298 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.219307 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.263190 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.270299 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.329545 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-65b67ff7d-d2fkp"] Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.335188 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.339676 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: W1122 10:57:28.339766 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5fcfb96_741e_467c_971f_762618aa54d5.slice/crio-9df81fc71c5ef6586ff65e990ccc9036254623ae49c2409afd4e0411311260a2 WatchSource:0}: Error finding container 9df81fc71c5ef6586ff65e990ccc9036254623ae49c2409afd4e0411311260a2: Status 404 returned error can't find the container with id 9df81fc71c5ef6586ff65e990ccc9036254623ae49c2409afd4e0411311260a2 Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.504965 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.736379 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65b67ff7d-d2fkp" event={"ID":"c5fcfb96-741e-467c-971f-762618aa54d5","Type":"ContainerStarted","Data":"9df81fc71c5ef6586ff65e990ccc9036254623ae49c2409afd4e0411311260a2"} Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.747826 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-68769dd845-84s2z" event={"ID":"3c8571ff-d236-4cc6-aebe-ffa8be3ef604","Type":"ContainerStarted","Data":"dbcb6cd61813c433f3355093dc3ef7f2cc921b3c0e9cb4b9fc3af906184781fc"} Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.747875 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-68769dd845-84s2z" event={"ID":"3c8571ff-d236-4cc6-aebe-ffa8be3ef604","Type":"ContainerStarted","Data":"ccd61f6adad51715c7d7797fb592fd6d3e841fcd7ee9bfdabfa6938170f3f849"} Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.768092 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" event={"ID":"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d","Type":"ContainerStarted","Data":"689e1796bb8d4eb06ae4606d099a432308d7571b4a10fa5a8da626f09255b6b9"} Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.768711 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.770935 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.770966 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.770980 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.771486 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-68769dd845-84s2z" podStartSLOduration=1.771474624 podStartE2EDuration="1.771474624s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:28.766228914 +0000 UTC m=+1069.067834201" watchObservedRunningTime="2025-11-22 10:57:28.771474624 +0000 UTC m=+1069.073079911" Nov 22 10:57:28 crc kubenswrapper[4926]: I1122 10:57:28.936924 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.605584 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5f7c4dcf85-jl8kd"] Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.607260 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.610376 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.611363 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.631973 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f7c4dcf85-jl8kd"] Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666095 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-ovndb-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666167 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-httpd-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666233 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666256 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-internal-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666315 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzqpg\" (UniqueName: \"kubernetes.io/projected/3260200f-bc21-4521-9a62-2f67ab26f0df-kube-api-access-hzqpg\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666384 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-public-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.666426 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-combined-ca-bundle\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.768710 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzqpg\" (UniqueName: \"kubernetes.io/projected/3260200f-bc21-4521-9a62-2f67ab26f0df-kube-api-access-hzqpg\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770153 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-public-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770239 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-combined-ca-bundle\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770382 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-ovndb-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770435 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-httpd-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770490 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.770516 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-internal-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.777014 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-public-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.777405 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-httpd-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.778102 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-internal-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.778954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-combined-ca-bundle\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.780579 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-ovndb-tls-certs\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.781444 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3260200f-bc21-4521-9a62-2f67ab26f0df-config\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.786739 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzqpg\" (UniqueName: \"kubernetes.io/projected/3260200f-bc21-4521-9a62-2f67ab26f0df-kube-api-access-hzqpg\") pod \"neutron-5f7c4dcf85-jl8kd\" (UID: \"3260200f-bc21-4521-9a62-2f67ab26f0df\") " pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.802039 4926 generic.go:334] "Generic (PLEG): container finished" podID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerID="784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4" exitCode=0 Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.802101 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" event={"ID":"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d","Type":"ContainerDied","Data":"784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4"} Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.811125 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65b67ff7d-d2fkp" event={"ID":"c5fcfb96-741e-467c-971f-762618aa54d5","Type":"ContainerStarted","Data":"8a34e44e2fb522335e12044012b732fd8e98177f9a0a0e110b3813821c9e22f4"} Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.811185 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-65b67ff7d-d2fkp" event={"ID":"c5fcfb96-741e-467c-971f-762618aa54d5","Type":"ContainerStarted","Data":"94e6d4048f80c62b0b250da4b62b0d79148ad7e1b056917ee525639cb060530f"} Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.812079 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.812109 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.820308 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerStarted","Data":"9f6e38673b72f96eacdc348f5ec4654dfb34a31ecfafba67822abe86d2cad186"} Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.820347 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerStarted","Data":"b610cf4d2c20cc88a9b0d4c57fadd7eb904155b32a0fbdf1fb2004005d1ceb7b"} Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.820360 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.855867 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-65b67ff7d-d2fkp" podStartSLOduration=2.8558415459999997 podStartE2EDuration="2.855841546s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:29.844183823 +0000 UTC m=+1070.145789110" watchObservedRunningTime="2025-11-22 10:57:29.855841546 +0000 UTC m=+1070.157446833" Nov 22 10:57:29 crc kubenswrapper[4926]: I1122 10:57:29.983457 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.628325 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f7c4dcf85-jl8kd"] Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.833540 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f7c4dcf85-jl8kd" event={"ID":"3260200f-bc21-4521-9a62-2f67ab26f0df","Type":"ContainerStarted","Data":"60a33a64ae979aa3414e43c87c87de14daeb300901a17243e4ecdf5f049b5d54"} Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.839313 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" event={"ID":"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d","Type":"ContainerStarted","Data":"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab"} Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.842015 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.842039 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.842082 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerStarted","Data":"ab491699e4c12708fd67dba8ac5c6be86603d3691c9798faec0c28312c58cca5"} Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.842208 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:57:30 crc kubenswrapper[4926]: I1122 10:57:30.842223 4926 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 10:57:31 crc kubenswrapper[4926]: I1122 10:57:31.356269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:31 crc kubenswrapper[4926]: I1122 10:57:31.358519 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:57:31 crc kubenswrapper[4926]: I1122 10:57:31.544047 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:57:31 crc kubenswrapper[4926]: I1122 10:57:31.545162 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:57:32 crc kubenswrapper[4926]: I1122 10:57:32.872202 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f7c4dcf85-jl8kd" event={"ID":"3260200f-bc21-4521-9a62-2f67ab26f0df","Type":"ContainerStarted","Data":"4c7f7e40364c3b6c4f2d34b01c10e84d14867579e16621ba5282205cd2ba0084"} Nov 22 10:57:32 crc kubenswrapper[4926]: I1122 10:57:32.873214 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:32 crc kubenswrapper[4926]: I1122 10:57:32.906480 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" podStartSLOduration=5.906461727 podStartE2EDuration="5.906461727s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:32.901871825 +0000 UTC m=+1073.203477112" watchObservedRunningTime="2025-11-22 10:57:32.906461727 +0000 UTC m=+1073.208067014" Nov 22 10:57:32 crc kubenswrapper[4926]: I1122 10:57:32.930915 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-64c985686b-kqzqh" podStartSLOduration=5.930870274 podStartE2EDuration="5.930870274s" podCreationTimestamp="2025-11-22 10:57:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:32.927979342 +0000 UTC m=+1073.229584629" watchObservedRunningTime="2025-11-22 10:57:32.930870274 +0000 UTC m=+1073.232475561" Nov 22 10:57:33 crc kubenswrapper[4926]: I1122 10:57:33.885665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f7c4dcf85-jl8kd" event={"ID":"3260200f-bc21-4521-9a62-2f67ab26f0df","Type":"ContainerStarted","Data":"215622f09dac6ee52df2ec47d5ccdf27e3e894a5b0e6644e09800fc9e30c228a"} Nov 22 10:57:33 crc kubenswrapper[4926]: I1122 10:57:33.886391 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:57:33 crc kubenswrapper[4926]: I1122 10:57:33.909821 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5f7c4dcf85-jl8kd" podStartSLOduration=4.909801784 podStartE2EDuration="4.909801784s" podCreationTimestamp="2025-11-22 10:57:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:33.909048492 +0000 UTC m=+1074.210653799" watchObservedRunningTime="2025-11-22 10:57:33.909801784 +0000 UTC m=+1074.211407071" Nov 22 10:57:37 crc kubenswrapper[4926]: I1122 10:57:37.712296 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:37 crc kubenswrapper[4926]: I1122 10:57:37.802414 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:57:37 crc kubenswrapper[4926]: I1122 10:57:37.802688 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="dnsmasq-dns" containerID="cri-o://ef7fb645788760bfd3a10062b152cbea3873c21b8f43f1d9df41e595f2b29d3c" gracePeriod=10 Nov 22 10:57:37 crc kubenswrapper[4926]: I1122 10:57:37.949948 4926 generic.go:334] "Generic (PLEG): container finished" podID="b8d7394c-3470-41de-8f80-dad43dadff31" containerID="ef7fb645788760bfd3a10062b152cbea3873c21b8f43f1d9df41e595f2b29d3c" exitCode=0 Nov 22 10:57:37 crc kubenswrapper[4926]: I1122 10:57:37.949993 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" event={"ID":"b8d7394c-3470-41de-8f80-dad43dadff31","Type":"ContainerDied","Data":"ef7fb645788760bfd3a10062b152cbea3873c21b8f43f1d9df41e595f2b29d3c"} Nov 22 10:57:38 crc kubenswrapper[4926]: I1122 10:57:38.075578 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 22 10:57:38 crc kubenswrapper[4926]: I1122 10:57:38.221379 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-86dd5d599b-jndzq" podUID="a7c08c13-5c9c-42ac-8fdc-e651c26d97fc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.150:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.150:8443: connect: connection refused" Nov 22 10:57:38 crc kubenswrapper[4926]: I1122 10:57:38.967159 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" event={"ID":"b8d7394c-3470-41de-8f80-dad43dadff31","Type":"ContainerDied","Data":"1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9"} Nov 22 10:57:38 crc kubenswrapper[4926]: I1122 10:57:38.967672 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a13b1b8a2facc9e556989bdeb008d51b515b063691966a8681a0bc1510979b9" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.103602 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.256906 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.256978 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsnkn\" (UniqueName: \"kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.257020 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.257134 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.257174 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.257703 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc\") pod \"b8d7394c-3470-41de-8f80-dad43dadff31\" (UID: \"b8d7394c-3470-41de-8f80-dad43dadff31\") " Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.269030 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn" (OuterVolumeSpecName: "kube-api-access-gsnkn") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "kube-api-access-gsnkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.317374 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.321897 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.332536 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config" (OuterVolumeSpecName: "config") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.333786 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.349799 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b8d7394c-3470-41de-8f80-dad43dadff31" (UID: "b8d7394c-3470-41de-8f80-dad43dadff31"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360581 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360624 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsnkn\" (UniqueName: \"kubernetes.io/projected/b8d7394c-3470-41de-8f80-dad43dadff31-kube-api-access-gsnkn\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360640 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360652 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360664 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.360675 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8d7394c-3470-41de-8f80-dad43dadff31-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.982028 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fh2gk" event={"ID":"63e63df0-e7ff-46a2-9b1d-60be115851ce","Type":"ContainerStarted","Data":"41e0bea0065c6eacfb4160378ac61f9c77f893c6bc4fcb6785fb1a768f7faa4d"} Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.986871 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bw5rr" event={"ID":"05bbddb1-c370-4805-9f91-373535d67f52","Type":"ContainerStarted","Data":"0356bf16a5cb63a8ceac47397e77e5a730a5a9ae12746b24f7d325954c0874f7"} Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.995512 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-9ff7g" Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996054 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-central-agent" containerID="cri-o://48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8" gracePeriod=30 Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996232 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="proxy-httpd" containerID="cri-o://bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f" gracePeriod=30 Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996279 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="sg-core" containerID="cri-o://067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc" gracePeriod=30 Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996316 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-notification-agent" containerID="cri-o://a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390" gracePeriod=30 Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996598 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerStarted","Data":"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f"} Nov 22 10:57:39 crc kubenswrapper[4926]: I1122 10:57:39.996631 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.015207 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-fh2gk" podStartSLOduration=3.416415068 podStartE2EDuration="52.015186353s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="2025-11-22 10:56:50.284360683 +0000 UTC m=+1030.585965960" lastFinishedPulling="2025-11-22 10:57:38.883131958 +0000 UTC m=+1079.184737245" observedRunningTime="2025-11-22 10:57:40.010826418 +0000 UTC m=+1080.312431705" watchObservedRunningTime="2025-11-22 10:57:40.015186353 +0000 UTC m=+1080.316791640" Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.040641 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-bw5rr" podStartSLOduration=3.570383242 podStartE2EDuration="52.04061695s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="2025-11-22 10:56:50.41778819 +0000 UTC m=+1030.719393477" lastFinishedPulling="2025-11-22 10:57:38.888021898 +0000 UTC m=+1079.189627185" observedRunningTime="2025-11-22 10:57:40.035560395 +0000 UTC m=+1080.337165702" watchObservedRunningTime="2025-11-22 10:57:40.04061695 +0000 UTC m=+1080.342222247" Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.064959 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.487257057 podStartE2EDuration="52.064938445s" podCreationTimestamp="2025-11-22 10:56:48 +0000 UTC" firstStartedPulling="2025-11-22 10:56:50.307043535 +0000 UTC m=+1030.608648822" lastFinishedPulling="2025-11-22 10:57:38.884724923 +0000 UTC m=+1079.186330210" observedRunningTime="2025-11-22 10:57:40.063058641 +0000 UTC m=+1080.364663928" watchObservedRunningTime="2025-11-22 10:57:40.064938445 +0000 UTC m=+1080.366543732" Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.087291 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.094994 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-9ff7g"] Nov 22 10:57:40 crc kubenswrapper[4926]: I1122 10:57:40.599234 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" path="/var/lib/kubelet/pods/b8d7394c-3470-41de-8f80-dad43dadff31/volumes" Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.010959 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerID="bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f" exitCode=0 Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.011003 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerID="067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc" exitCode=2 Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.011013 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerID="48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8" exitCode=0 Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.011357 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerDied","Data":"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f"} Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.011460 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerDied","Data":"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc"} Nov 22 10:57:41 crc kubenswrapper[4926]: I1122 10:57:41.011490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerDied","Data":"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8"} Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.022690 4926 generic.go:334] "Generic (PLEG): container finished" podID="05bbddb1-c370-4805-9f91-373535d67f52" containerID="0356bf16a5cb63a8ceac47397e77e5a730a5a9ae12746b24f7d325954c0874f7" exitCode=0 Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.022872 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bw5rr" event={"ID":"05bbddb1-c370-4805-9f91-373535d67f52","Type":"ContainerDied","Data":"0356bf16a5cb63a8ceac47397e77e5a730a5a9ae12746b24f7d325954c0874f7"} Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.852347 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.937665 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.937836 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.937912 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.937947 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938060 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938109 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xm5s\" (UniqueName: \"kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938144 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts\") pod \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\" (UID: \"9ebf564c-b5b8-40b7-8899-fc953a485d4d\") " Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938185 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938438 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938559 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.938571 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9ebf564c-b5b8-40b7-8899-fc953a485d4d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.943833 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts" (OuterVolumeSpecName: "scripts") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.946126 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s" (OuterVolumeSpecName: "kube-api-access-6xm5s") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "kube-api-access-6xm5s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:42 crc kubenswrapper[4926]: I1122 10:57:42.970174 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.006742 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.031830 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data" (OuterVolumeSpecName: "config-data") pod "9ebf564c-b5b8-40b7-8899-fc953a485d4d" (UID: "9ebf564c-b5b8-40b7-8899-fc953a485d4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.034698 4926 generic.go:334] "Generic (PLEG): container finished" podID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerID="a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390" exitCode=0 Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.034862 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerDied","Data":"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390"} Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.034910 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9ebf564c-b5b8-40b7-8899-fc953a485d4d","Type":"ContainerDied","Data":"0ac43e049d01b0fd78612539f468ef55b38835efd3d53892f584fc65ab0566c4"} Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.034928 4926 scope.go:117] "RemoveContainer" containerID="bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.035040 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.040860 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.040913 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.040925 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xm5s\" (UniqueName: \"kubernetes.io/projected/9ebf564c-b5b8-40b7-8899-fc953a485d4d-kube-api-access-6xm5s\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.040935 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.040944 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ebf564c-b5b8-40b7-8899-fc953a485d4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.073730 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.079380 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.081599 4926 scope.go:117] "RemoveContainer" containerID="067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.101874 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102221 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-notification-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102237 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-notification-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102257 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="proxy-httpd" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102263 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="proxy-httpd" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102281 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="sg-core" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102287 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="sg-core" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102297 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="dnsmasq-dns" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102303 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="dnsmasq-dns" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102311 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-central-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102317 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-central-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.102331 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="init" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102336 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="init" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102511 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="sg-core" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102525 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8d7394c-3470-41de-8f80-dad43dadff31" containerName="dnsmasq-dns" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102536 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="proxy-httpd" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102549 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-central-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.102559 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" containerName="ceilometer-notification-agent" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.103997 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.108184 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.108285 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.120252 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.145584 4926 scope.go:117] "RemoveContainer" containerID="a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.183321 4926 scope.go:117] "RemoveContainer" containerID="48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.237835 4926 scope.go:117] "RemoveContainer" containerID="bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.238366 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f\": container with ID starting with bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f not found: ID does not exist" containerID="bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.238438 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f"} err="failed to get container status \"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f\": rpc error: code = NotFound desc = could not find container \"bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f\": container with ID starting with bcf6c4cd3858ca37116ed5839ef9c94c54ef8896aae3fc70505a5db338fd869f not found: ID does not exist" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.238474 4926 scope.go:117] "RemoveContainer" containerID="067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.238916 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc\": container with ID starting with 067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc not found: ID does not exist" containerID="067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.238953 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc"} err="failed to get container status \"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc\": rpc error: code = NotFound desc = could not find container \"067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc\": container with ID starting with 067015ba91f865ef3fda97dc58b90026ce16d3cee99ee22920cb10888c02a4fc not found: ID does not exist" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.238980 4926 scope.go:117] "RemoveContainer" containerID="a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.239305 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390\": container with ID starting with a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390 not found: ID does not exist" containerID="a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.239351 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390"} err="failed to get container status \"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390\": rpc error: code = NotFound desc = could not find container \"a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390\": container with ID starting with a9296219406bba108a07922e83b06df459b0b44a88740e091803705d572be390 not found: ID does not exist" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.239385 4926 scope.go:117] "RemoveContainer" containerID="48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8" Nov 22 10:57:43 crc kubenswrapper[4926]: E1122 10:57:43.239684 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8\": container with ID starting with 48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8 not found: ID does not exist" containerID="48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.239706 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8"} err="failed to get container status \"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8\": rpc error: code = NotFound desc = could not find container \"48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8\": container with ID starting with 48fa46793c7476a56cef30695a0e515257c60a6f00469b5a1267c6d887037ff8 not found: ID does not exist" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.243823 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smk8w\" (UniqueName: \"kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.243867 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.243928 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.243975 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.244053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.244167 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.244283 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.308666 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346398 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346482 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346516 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346613 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smk8w\" (UniqueName: \"kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346648 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.346737 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.347973 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.348197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.351695 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.352585 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.352582 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.354417 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.368560 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smk8w\" (UniqueName: \"kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w\") pod \"ceilometer-0\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.443305 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.447768 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data\") pod \"05bbddb1-c370-4805-9f91-373535d67f52\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.447871 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle\") pod \"05bbddb1-c370-4805-9f91-373535d67f52\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.448382 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xmqvk\" (UniqueName: \"kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk\") pod \"05bbddb1-c370-4805-9f91-373535d67f52\" (UID: \"05bbddb1-c370-4805-9f91-373535d67f52\") " Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.451148 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "05bbddb1-c370-4805-9f91-373535d67f52" (UID: "05bbddb1-c370-4805-9f91-373535d67f52"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.451589 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk" (OuterVolumeSpecName: "kube-api-access-xmqvk") pod "05bbddb1-c370-4805-9f91-373535d67f52" (UID: "05bbddb1-c370-4805-9f91-373535d67f52"). InnerVolumeSpecName "kube-api-access-xmqvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.470635 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05bbddb1-c370-4805-9f91-373535d67f52" (UID: "05bbddb1-c370-4805-9f91-373535d67f52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.551284 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xmqvk\" (UniqueName: \"kubernetes.io/projected/05bbddb1-c370-4805-9f91-373535d67f52-kube-api-access-xmqvk\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.551518 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.551529 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05bbddb1-c370-4805-9f91-373535d67f52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:43 crc kubenswrapper[4926]: I1122 10:57:43.910451 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:57:43 crc kubenswrapper[4926]: W1122 10:57:43.922993 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8d021220_ab5c_4d9c_9b7a_dd2248121353.slice/crio-16f6ec5138e15730f597da51e53216f45abb5c1c78d9852df631d8dfd843e404 WatchSource:0}: Error finding container 16f6ec5138e15730f597da51e53216f45abb5c1c78d9852df631d8dfd843e404: Status 404 returned error can't find the container with id 16f6ec5138e15730f597da51e53216f45abb5c1c78d9852df631d8dfd843e404 Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.055304 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerStarted","Data":"16f6ec5138e15730f597da51e53216f45abb5c1c78d9852df631d8dfd843e404"} Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.056711 4926 generic.go:334] "Generic (PLEG): container finished" podID="63e63df0-e7ff-46a2-9b1d-60be115851ce" containerID="41e0bea0065c6eacfb4160378ac61f9c77f893c6bc4fcb6785fb1a768f7faa4d" exitCode=0 Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.056787 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fh2gk" event={"ID":"63e63df0-e7ff-46a2-9b1d-60be115851ce","Type":"ContainerDied","Data":"41e0bea0065c6eacfb4160378ac61f9c77f893c6bc4fcb6785fb1a768f7faa4d"} Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.058678 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bw5rr" event={"ID":"05bbddb1-c370-4805-9f91-373535d67f52","Type":"ContainerDied","Data":"09c2dd4fdcf62e99eaa0d6045b0a12d3588636064ff152067280e5f19bf9c7ae"} Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.058704 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09c2dd4fdcf62e99eaa0d6045b0a12d3588636064ff152067280e5f19bf9c7ae" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.058841 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bw5rr" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.615786 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ebf564c-b5b8-40b7-8899-fc953a485d4d" path="/var/lib/kubelet/pods/9ebf564c-b5b8-40b7-8899-fc953a485d4d/volumes" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.616763 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h"] Nov 22 10:57:44 crc kubenswrapper[4926]: E1122 10:57:44.617061 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05bbddb1-c370-4805-9f91-373535d67f52" containerName="barbican-db-sync" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.617077 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05bbddb1-c370-4805-9f91-373535d67f52" containerName="barbican-db-sync" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.617252 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="05bbddb1-c370-4805-9f91-373535d67f52" containerName="barbican-db-sync" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.625040 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.657740 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.658037 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-4ggkk" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.658526 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.659785 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h"] Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.710731 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6c6596fd55-5fshh"] Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.729372 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.740270 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.749047 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6c6596fd55-5fshh"] Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.789287 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f257r\" (UniqueName: \"kubernetes.io/projected/5659fc64-a862-4d05-989e-4e667a4bb792-kube-api-access-f257r\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.789366 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5659fc64-a862-4d05-989e-4e667a4bb792-logs\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.789427 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data-custom\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.789453 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-combined-ca-bundle\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.789479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.893951 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.898711 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.901994 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902055 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f257r\" (UniqueName: \"kubernetes.io/projected/5659fc64-a862-4d05-989e-4e667a4bb792-kube-api-access-f257r\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902112 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5659fc64-a862-4d05-989e-4e667a4bb792-logs\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902140 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hw95x\" (UniqueName: \"kubernetes.io/projected/58c72eaf-f8f2-4333-8057-a9237457d73c-kube-api-access-hw95x\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902179 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902206 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-combined-ca-bundle\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902231 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902259 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c72eaf-f8f2-4333-8057-a9237457d73c-logs\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902281 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkq95\" (UniqueName: \"kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902314 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data-custom\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902340 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-combined-ca-bundle\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902433 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902480 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902501 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902542 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.902624 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data-custom\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.903504 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5659fc64-a862-4d05-989e-4e667a4bb792-logs\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.915180 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data-custom\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.921088 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-combined-ca-bundle\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.921491 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5659fc64-a862-4d05-989e-4e667a4bb792-config-data\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.949565 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.951314 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f257r\" (UniqueName: \"kubernetes.io/projected/5659fc64-a862-4d05-989e-4e667a4bb792-kube-api-access-f257r\") pod \"barbican-keystone-listener-5b6fb59ff8-cgr6h\" (UID: \"5659fc64-a862-4d05-989e-4e667a4bb792\") " pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:44 crc kubenswrapper[4926]: I1122 10:57:44.955427 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.004623 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.004940 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005015 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hw95x\" (UniqueName: \"kubernetes.io/projected/58c72eaf-f8f2-4333-8057-a9237457d73c-kube-api-access-hw95x\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005053 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005074 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-combined-ca-bundle\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005097 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005120 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c72eaf-f8f2-4333-8057-a9237457d73c-logs\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005140 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkq95\" (UniqueName: \"kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005168 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005191 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005220 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.005272 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data-custom\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.006143 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.007029 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.007549 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.007858 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.007966 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58c72eaf-f8f2-4333-8057-a9237457d73c-logs\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.008485 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.011515 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.016091 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.023709 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.026654 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data-custom\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.026927 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-combined-ca-bundle\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.030121 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hw95x\" (UniqueName: \"kubernetes.io/projected/58c72eaf-f8f2-4333-8057-a9237457d73c-kube-api-access-hw95x\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.032646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58c72eaf-f8f2-4333-8057-a9237457d73c-config-data\") pod \"barbican-worker-6c6596fd55-5fshh\" (UID: \"58c72eaf-f8f2-4333-8057-a9237457d73c\") " pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.046046 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkq95\" (UniqueName: \"kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95\") pod \"dnsmasq-dns-688c87cc99-l2vl8\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.054044 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6c6596fd55-5fshh" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.106722 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.106796 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x52rt\" (UniqueName: \"kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.106829 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.106898 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.106963 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.118318 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerStarted","Data":"e057c2c43791991e9165d6cf61d6acbfd66ba63c5966a16d84e7c0c781bef430"} Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.184750 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.208751 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.208849 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.209030 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x52rt\" (UniqueName: \"kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.209047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.209095 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.209942 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.212989 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.219027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.229622 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x52rt\" (UniqueName: \"kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.247873 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle\") pod \"barbican-api-867d4cfd66-ftqtw\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.425425 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.503093 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.619789 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.619861 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.619911 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.619954 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x668t\" (UniqueName: \"kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.620248 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.620274 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data\") pod \"63e63df0-e7ff-46a2-9b1d-60be115851ce\" (UID: \"63e63df0-e7ff-46a2-9b1d-60be115851ce\") " Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.623976 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.630238 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.630678 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts" (OuterVolumeSpecName: "scripts") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.633593 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t" (OuterVolumeSpecName: "kube-api-access-x668t") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "kube-api-access-x668t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.657648 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6c6596fd55-5fshh"] Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.671789 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h"] Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.702987 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.708564 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data" (OuterVolumeSpecName: "config-data") pod "63e63df0-e7ff-46a2-9b1d-60be115851ce" (UID: "63e63df0-e7ff-46a2-9b1d-60be115851ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724246 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x668t\" (UniqueName: \"kubernetes.io/projected/63e63df0-e7ff-46a2-9b1d-60be115851ce-kube-api-access-x668t\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724282 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/63e63df0-e7ff-46a2-9b1d-60be115851ce-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724295 4926 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724307 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724321 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.724332 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63e63df0-e7ff-46a2-9b1d-60be115851ce-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:45 crc kubenswrapper[4926]: W1122 10:57:45.803082 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf3d2dfb5_d3bc_4adc_92c8_24104edfa164.slice/crio-53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379 WatchSource:0}: Error finding container 53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379: Status 404 returned error can't find the container with id 53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379 Nov 22 10:57:45 crc kubenswrapper[4926]: I1122 10:57:45.806300 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.005702 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:57:46 crc kubenswrapper[4926]: W1122 10:57:46.014375 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb85c12f2_c589_476e_94b3_cc7650c154eb.slice/crio-5bd833ed1e1e403f85abc3fc3e3f6ed58aa5288135bfdfc5f0a129c53111f3ad WatchSource:0}: Error finding container 5bd833ed1e1e403f85abc3fc3e3f6ed58aa5288135bfdfc5f0a129c53111f3ad: Status 404 returned error can't find the container with id 5bd833ed1e1e403f85abc3fc3e3f6ed58aa5288135bfdfc5f0a129c53111f3ad Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.150312 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" event={"ID":"5659fc64-a862-4d05-989e-4e667a4bb792","Type":"ContainerStarted","Data":"c173add5b547d4acf3585ec2f1257726bcb17fe3231642937d4fa9c19d911881"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.153420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" event={"ID":"f3d2dfb5-d3bc-4adc-92c8-24104edfa164","Type":"ContainerStarted","Data":"ad8606703e8e0ecede3bbc472529032921afc3239917eaa2622f99a553d39e44"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.153465 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" event={"ID":"f3d2dfb5-d3bc-4adc-92c8-24104edfa164","Type":"ContainerStarted","Data":"53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.154620 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c6596fd55-5fshh" event={"ID":"58c72eaf-f8f2-4333-8057-a9237457d73c","Type":"ContainerStarted","Data":"2d1818bde979fee2f52391662d1cecf745430f9def0b906d03e0108f5ddd1893"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.158927 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerStarted","Data":"a4302a4e6816e40c9a574135c82c609a1db7db9aa1e6698b6215ce43dfb3893d"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.161075 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-fh2gk" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.161128 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-fh2gk" event={"ID":"63e63df0-e7ff-46a2-9b1d-60be115851ce","Type":"ContainerDied","Data":"10d395895f40e7c6c34eeda0399a95fcfaa37add2d74670f7f3cd5c73fc96a17"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.161157 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10d395895f40e7c6c34eeda0399a95fcfaa37add2d74670f7f3cd5c73fc96a17" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.177606 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerStarted","Data":"5bd833ed1e1e403f85abc3fc3e3f6ed58aa5288135bfdfc5f0a129c53111f3ad"} Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.348495 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:57:46 crc kubenswrapper[4926]: E1122 10:57:46.349854 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" containerName="cinder-db-sync" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.352902 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" containerName="cinder-db-sync" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.353367 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" containerName="cinder-db-sync" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.354784 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.360606 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-cq97l" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.361248 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.362061 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.364672 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.383149 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.398366 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.450398 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.452061 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.461867 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.537692 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539135 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539198 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539225 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539253 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd245\" (UniqueName: \"kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539295 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.539370 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.543130 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.545377 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:46 crc kubenswrapper[4926]: E1122 10:57:46.548161 4926 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 22 10:57:46 crc kubenswrapper[4926]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/f3d2dfb5-d3bc-4adc-92c8-24104edfa164/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 22 10:57:46 crc kubenswrapper[4926]: > podSandboxID="53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379" Nov 22 10:57:46 crc kubenswrapper[4926]: E1122 10:57:46.548321 4926 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 22 10:57:46 crc kubenswrapper[4926]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n567h69h57fh656h5dh545h7h595h5bhb9h5cbh548h575h68ch689h5cdhd6h5c6hd6h5c6h546h9h657h688h56h5cch9bh585h56fh64ch587h647q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-swift-storage-0,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-swift-storage-0,SubPath:dns-swift-storage-0,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tkq95,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-688c87cc99-l2vl8_openstack(f3d2dfb5-d3bc-4adc-92c8-24104edfa164): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/f3d2dfb5-d3bc-4adc-92c8-24104edfa164/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 22 10:57:46 crc kubenswrapper[4926]: > logger="UnhandledError" Nov 22 10:57:46 crc kubenswrapper[4926]: E1122 10:57:46.549780 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/f3d2dfb5-d3bc-4adc-92c8-24104edfa164/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" podUID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640546 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640655 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640697 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640780 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640825 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.640973 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641057 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641085 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641153 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641200 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641222 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641252 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqknr\" (UniqueName: \"kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641298 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641418 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd245\" (UniqueName: \"kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641447 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641471 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641493 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6plr\" (UniqueName: \"kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.641586 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.644760 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.647338 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.647949 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.650075 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.659644 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd245\" (UniqueName: \"kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245\") pod \"cinder-scheduler-0\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.697109 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.743011 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.743238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.743854 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.743974 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744295 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744331 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744356 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqknr\" (UniqueName: \"kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744404 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744428 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744455 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6plr\" (UniqueName: \"kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744485 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744630 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.744681 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.745214 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.745248 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.746112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.746417 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.746430 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.748626 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.749868 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.756632 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.760737 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.763104 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6plr\" (UniqueName: \"kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr\") pod \"dnsmasq-dns-6bb4fc677f-h86xn\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.764287 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqknr\" (UniqueName: \"kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr\") pod \"cinder-api-0\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " pod="openstack/cinder-api-0" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.780430 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:46 crc kubenswrapper[4926]: I1122 10:57:46.925614 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.230275 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.248192 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerStarted","Data":"7d58570f8b7f83376ced816694bf451d8f04d6248b463f21a024cc08d83b574e"} Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.248253 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerStarted","Data":"cf7a06d1847cf43a40fb2690b3fc64105778392bc0f8287587552bfe07883185"} Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.249584 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.249616 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.255560 4926 generic.go:334] "Generic (PLEG): container finished" podID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" containerID="ad8606703e8e0ecede3bbc472529032921afc3239917eaa2622f99a553d39e44" exitCode=0 Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.255745 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" event={"ID":"f3d2dfb5-d3bc-4adc-92c8-24104edfa164","Type":"ContainerDied","Data":"ad8606703e8e0ecede3bbc472529032921afc3239917eaa2622f99a553d39e44"} Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.290939 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerStarted","Data":"2bf0f11d91c21ac1c621718d3069598344fe2d89df6009d0fcf1fdc951f587cb"} Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.292734 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-867d4cfd66-ftqtw" podStartSLOduration=3.292720304 podStartE2EDuration="3.292720304s" podCreationTimestamp="2025-11-22 10:57:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:47.27825001 +0000 UTC m=+1087.579855307" watchObservedRunningTime="2025-11-22 10:57:47.292720304 +0000 UTC m=+1087.594325591" Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.401540 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.538935 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:47 crc kubenswrapper[4926]: W1122 10:57:47.678803 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03538cf9_7502_4102_96b0_4a071e0e2292.slice/crio-9ba916fd9dbbada27aa36ba595c92a7b34c489907391445a502b7c5329719342 WatchSource:0}: Error finding container 9ba916fd9dbbada27aa36ba595c92a7b34c489907391445a502b7c5329719342: Status 404 returned error can't find the container with id 9ba916fd9dbbada27aa36ba595c92a7b34c489907391445a502b7c5329719342 Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.801021 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976090 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkq95\" (UniqueName: \"kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976172 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976201 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976268 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976326 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.976371 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc\") pod \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\" (UID: \"f3d2dfb5-d3bc-4adc-92c8-24104edfa164\") " Nov 22 10:57:47 crc kubenswrapper[4926]: I1122 10:57:47.984565 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95" (OuterVolumeSpecName: "kube-api-access-tkq95") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "kube-api-access-tkq95". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.024214 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.027032 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.032967 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.032980 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config" (OuterVolumeSpecName: "config") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.047077 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f3d2dfb5-d3bc-4adc-92c8-24104edfa164" (UID: "f3d2dfb5-d3bc-4adc-92c8-24104edfa164"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078447 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkq95\" (UniqueName: \"kubernetes.io/projected/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-kube-api-access-tkq95\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078481 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078489 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078500 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078508 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.078516 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f3d2dfb5-d3bc-4adc-92c8-24104edfa164-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.302731 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerStarted","Data":"9ba916fd9dbbada27aa36ba595c92a7b34c489907391445a502b7c5329719342"} Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.303713 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" event={"ID":"0667807b-dd56-4e0b-b2b7-9936b24ad975","Type":"ContainerStarted","Data":"762abeab209ce3bfff8550418c0fd3120feff71ae72a1d38c5f8399ade849207"} Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.304847 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerStarted","Data":"1b90e31e27a74c1618d1309850fa547d8fdc41dc351202d24194183479c7d794"} Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.306772 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.306988 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-l2vl8" event={"ID":"f3d2dfb5-d3bc-4adc-92c8-24104edfa164","Type":"ContainerDied","Data":"53813f9ae4851450cc7d8f77a5d46f70b0ac34e68d1c984b565a4ed8d6d4e379"} Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.307018 4926 scope.go:117] "RemoveContainer" containerID="ad8606703e8e0ecede3bbc472529032921afc3239917eaa2622f99a553d39e44" Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.406868 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.412361 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-l2vl8"] Nov 22 10:57:48 crc kubenswrapper[4926]: I1122 10:57:48.601530 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" path="/var/lib/kubelet/pods/f3d2dfb5-d3bc-4adc-92c8-24104edfa164/volumes" Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.318463 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c6596fd55-5fshh" event={"ID":"58c72eaf-f8f2-4333-8057-a9237457d73c","Type":"ContainerStarted","Data":"01f0ef660fb70caa566687a94a05cfa56561fb4e2a2ba2d5a4037e4510b01a07"} Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.319907 4926 generic.go:334] "Generic (PLEG): container finished" podID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerID="bf687a61528e0d4aacd60b4c8eb5af6fc92914a091c034f33c244f03be19d041" exitCode=0 Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.319959 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" event={"ID":"0667807b-dd56-4e0b-b2b7-9936b24ad975","Type":"ContainerDied","Data":"bf687a61528e0d4aacd60b4c8eb5af6fc92914a091c034f33c244f03be19d041"} Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.325438 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerStarted","Data":"f12c7d0fbc318c22fd3030bae7cb7292e6b7fe85d2883d8289ed56c195aa7aff"} Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.325510 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.328113 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" event={"ID":"5659fc64-a862-4d05-989e-4e667a4bb792","Type":"ContainerStarted","Data":"7795e4874b6f9bc173b684d1ba5c7b728f04361d0d3735048efd6ad6d744d993"} Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.381603 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.196919994 podStartE2EDuration="6.381583887s" podCreationTimestamp="2025-11-22 10:57:43 +0000 UTC" firstStartedPulling="2025-11-22 10:57:43.927650916 +0000 UTC m=+1084.229256213" lastFinishedPulling="2025-11-22 10:57:48.112314819 +0000 UTC m=+1088.413920106" observedRunningTime="2025-11-22 10:57:49.370799698 +0000 UTC m=+1089.672404985" watchObservedRunningTime="2025-11-22 10:57:49.381583887 +0000 UTC m=+1089.683189174" Nov 22 10:57:49 crc kubenswrapper[4926]: I1122 10:57:49.750855 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:50 crc kubenswrapper[4926]: I1122 10:57:50.336441 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6c6596fd55-5fshh" event={"ID":"58c72eaf-f8f2-4333-8057-a9237457d73c","Type":"ContainerStarted","Data":"5929632abcf247e6e2f6cc3a67a0e2f7847330451fa491a00347b54cee598f2e"} Nov 22 10:57:50 crc kubenswrapper[4926]: I1122 10:57:50.339823 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerStarted","Data":"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc"} Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.247309 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-d84d6cb4b-w4kcl"] Nov 22 10:57:51 crc kubenswrapper[4926]: E1122 10:57:51.256434 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" containerName="init" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.256461 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" containerName="init" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.256678 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3d2dfb5-d3bc-4adc-92c8-24104edfa164" containerName="init" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.257700 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.265245 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.267750 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.273909 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d84d6cb4b-w4kcl"] Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.387222 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" event={"ID":"5659fc64-a862-4d05-989e-4e667a4bb792","Type":"ContainerStarted","Data":"537798cbabd7befcd1ecf545536a3c309636ca29f09287775ec32cf7672fe433"} Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.393322 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" event={"ID":"0667807b-dd56-4e0b-b2b7-9936b24ad975","Type":"ContainerStarted","Data":"156380288bb350c0ba1cffa92a44e6229c6cba775887be81ba0dc3cc886efefa"} Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.393563 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.397735 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerStarted","Data":"d644a2b72e6965c29f8677c53eb1fde30d0936ad857c292aeee5c0d754905a3c"} Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.414372 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5b6fb59ff8-cgr6h" podStartSLOduration=5.000172845 podStartE2EDuration="7.414354085s" podCreationTimestamp="2025-11-22 10:57:44 +0000 UTC" firstStartedPulling="2025-11-22 10:57:45.680222857 +0000 UTC m=+1085.981828144" lastFinishedPulling="2025-11-22 10:57:48.094404107 +0000 UTC m=+1088.396009384" observedRunningTime="2025-11-22 10:57:51.406518101 +0000 UTC m=+1091.708123388" watchObservedRunningTime="2025-11-22 10:57:51.414354085 +0000 UTC m=+1091.715959372" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.440224 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" podStartSLOduration=5.440206784 podStartE2EDuration="5.440206784s" podCreationTimestamp="2025-11-22 10:57:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:51.435490329 +0000 UTC m=+1091.737095606" watchObservedRunningTime="2025-11-22 10:57:51.440206784 +0000 UTC m=+1091.741812071" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461667 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nps27\" (UniqueName: \"kubernetes.io/projected/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-kube-api-access-nps27\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461793 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461822 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-internal-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461842 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-combined-ca-bundle\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461868 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-public-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461922 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-logs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.461947 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data-custom\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.489120 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6c6596fd55-5fshh" podStartSLOduration=5.058144612 podStartE2EDuration="7.489100352s" podCreationTimestamp="2025-11-22 10:57:44 +0000 UTC" firstStartedPulling="2025-11-22 10:57:45.663853639 +0000 UTC m=+1085.965458926" lastFinishedPulling="2025-11-22 10:57:48.094809379 +0000 UTC m=+1088.396414666" observedRunningTime="2025-11-22 10:57:51.453390861 +0000 UTC m=+1091.754996148" watchObservedRunningTime="2025-11-22 10:57:51.489100352 +0000 UTC m=+1091.790705629" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.563002 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.563055 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-internal-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.563497 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-combined-ca-bundle\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.563575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-public-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.567488 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-logs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.567522 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data-custom\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.567650 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nps27\" (UniqueName: \"kubernetes.io/projected/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-kube-api-access-nps27\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.568901 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-logs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.570979 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-public-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.574653 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-combined-ca-bundle\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.578993 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data-custom\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.586549 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-internal-tls-certs\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.609443 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.611071 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-config-data\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.630098 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nps27\" (UniqueName: \"kubernetes.io/projected/d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b-kube-api-access-nps27\") pod \"barbican-api-d84d6cb4b-w4kcl\" (UID: \"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b\") " pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.705722 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:57:51 crc kubenswrapper[4926]: I1122 10:57:51.889062 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.417741 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerStarted","Data":"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566"} Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.418445 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api-log" containerID="cri-o://3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" gracePeriod=30 Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.418742 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.419043 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api" containerID="cri-o://490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" gracePeriod=30 Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.429277 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerStarted","Data":"7f95f216ed43bc6facdd0d7c8a829a47795cbf662d9dad96aa2be1e0492ca903"} Nov 22 10:57:52 crc kubenswrapper[4926]: W1122 10:57:52.443017 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0482fa7_3e3e_4c94_8bb4_76e4aa4f1d7b.slice/crio-50aa3b215e09281d415747cb00a5538c157c141021c7a7a011b448d9ee3c5f32 WatchSource:0}: Error finding container 50aa3b215e09281d415747cb00a5538c157c141021c7a7a011b448d9ee3c5f32: Status 404 returned error can't find the container with id 50aa3b215e09281d415747cb00a5538c157c141021c7a7a011b448d9ee3c5f32 Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.463250 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-d84d6cb4b-w4kcl"] Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.470818 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.470759549 podStartE2EDuration="6.470759549s" podCreationTimestamp="2025-11-22 10:57:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:52.444857259 +0000 UTC m=+1092.746462546" watchObservedRunningTime="2025-11-22 10:57:52.470759549 +0000 UTC m=+1092.772364846" Nov 22 10:57:52 crc kubenswrapper[4926]: I1122 10:57:52.484428 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=5.70315297 podStartE2EDuration="6.484405619s" podCreationTimestamp="2025-11-22 10:57:46 +0000 UTC" firstStartedPulling="2025-11-22 10:57:47.673785685 +0000 UTC m=+1087.975390972" lastFinishedPulling="2025-11-22 10:57:48.455038334 +0000 UTC m=+1088.756643621" observedRunningTime="2025-11-22 10:57:52.466698623 +0000 UTC m=+1092.768303910" watchObservedRunningTime="2025-11-22 10:57:52.484405619 +0000 UTC m=+1092.786010906" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.264339 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423046 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423098 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqknr\" (UniqueName: \"kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423166 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423198 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423260 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423289 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.423342 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs\") pod \"03538cf9-7502-4102-96b0-4a071e0e2292\" (UID: \"03538cf9-7502-4102-96b0-4a071e0e2292\") " Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.424358 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs" (OuterVolumeSpecName: "logs") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.424705 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.429820 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr" (OuterVolumeSpecName: "kube-api-access-tqknr") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "kube-api-access-tqknr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.430296 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts" (OuterVolumeSpecName: "scripts") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.431447 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.445942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d84d6cb4b-w4kcl" event={"ID":"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b","Type":"ContainerStarted","Data":"2ce3f89c30ddfc24fa0a6e6d4c87fd5e94b6cc938ccf734013d7420cb66cc8bb"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.445990 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d84d6cb4b-w4kcl" event={"ID":"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b","Type":"ContainerStarted","Data":"f7dd6250f496b76901e51a6a4810ba7e5a11d4e203986656496df11708810b61"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.446002 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-d84d6cb4b-w4kcl" event={"ID":"d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b","Type":"ContainerStarted","Data":"50aa3b215e09281d415747cb00a5538c157c141021c7a7a011b448d9ee3c5f32"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.446330 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.446359 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448387 4926 generic.go:334] "Generic (PLEG): container finished" podID="03538cf9-7502-4102-96b0-4a071e0e2292" containerID="490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" exitCode=0 Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448412 4926 generic.go:334] "Generic (PLEG): container finished" podID="03538cf9-7502-4102-96b0-4a071e0e2292" containerID="3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" exitCode=143 Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448568 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerDied","Data":"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448624 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerDied","Data":"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"03538cf9-7502-4102-96b0-4a071e0e2292","Type":"ContainerDied","Data":"9ba916fd9dbbada27aa36ba595c92a7b34c489907391445a502b7c5329719342"} Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448652 4926 scope.go:117] "RemoveContainer" containerID="490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.448929 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.470683 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.483299 4926 scope.go:117] "RemoveContainer" containerID="3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.503210 4926 scope.go:117] "RemoveContainer" containerID="490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" Nov 22 10:57:53 crc kubenswrapper[4926]: E1122 10:57:53.504951 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566\": container with ID starting with 490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566 not found: ID does not exist" containerID="490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.504984 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566"} err="failed to get container status \"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566\": rpc error: code = NotFound desc = could not find container \"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566\": container with ID starting with 490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566 not found: ID does not exist" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.505004 4926 scope.go:117] "RemoveContainer" containerID="3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" Nov 22 10:57:53 crc kubenswrapper[4926]: E1122 10:57:53.505339 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc\": container with ID starting with 3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc not found: ID does not exist" containerID="3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.505418 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc"} err="failed to get container status \"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc\": rpc error: code = NotFound desc = could not find container \"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc\": container with ID starting with 3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc not found: ID does not exist" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.505472 4926 scope.go:117] "RemoveContainer" containerID="490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.505880 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566"} err="failed to get container status \"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566\": rpc error: code = NotFound desc = could not find container \"490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566\": container with ID starting with 490c1051c37fd0a49c6f40cd160f04d76c61e658334b565748c727069c819566 not found: ID does not exist" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.505927 4926 scope.go:117] "RemoveContainer" containerID="3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.506234 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc"} err="failed to get container status \"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc\": rpc error: code = NotFound desc = could not find container \"3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc\": container with ID starting with 3db5b918a88e9af2b7b374f31424dd07c1e4e3b29fc93438ce636a9ce07a52bc not found: ID does not exist" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.509154 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data" (OuterVolumeSpecName: "config-data") pod "03538cf9-7502-4102-96b0-4a071e0e2292" (UID: "03538cf9-7502-4102-96b0-4a071e0e2292"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525516 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/03538cf9-7502-4102-96b0-4a071e0e2292-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525552 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525564 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/03538cf9-7502-4102-96b0-4a071e0e2292-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525576 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525587 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqknr\" (UniqueName: \"kubernetes.io/projected/03538cf9-7502-4102-96b0-4a071e0e2292-kube-api-access-tqknr\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525599 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.525609 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/03538cf9-7502-4102-96b0-4a071e0e2292-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.538412 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.576255 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-d84d6cb4b-w4kcl" podStartSLOduration=2.5762307939999998 podStartE2EDuration="2.576230794s" podCreationTimestamp="2025-11-22 10:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:53.470902583 +0000 UTC m=+1093.772507870" watchObservedRunningTime="2025-11-22 10:57:53.576230794 +0000 UTC m=+1093.877836081" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.782547 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.805586 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.812757 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:53 crc kubenswrapper[4926]: E1122 10:57:53.813301 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api-log" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.813327 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api-log" Nov 22 10:57:53 crc kubenswrapper[4926]: E1122 10:57:53.813366 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.813377 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.813589 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.813640 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" containerName="cinder-api-log" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.814918 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.817287 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.817499 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.817606 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.820454 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972217 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-scripts\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972279 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hz9dd\" (UniqueName: \"kubernetes.io/projected/8232e2d5-3714-47a4-9739-2e370a17300b-kube-api-access-hz9dd\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972315 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972371 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972426 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8232e2d5-3714-47a4-9739-2e370a17300b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972472 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972499 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972527 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8232e2d5-3714-47a4-9739-2e370a17300b-logs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:53 crc kubenswrapper[4926]: I1122 10:57:53.972606 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data-custom\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.064263 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-86dd5d599b-jndzq" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073739 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data-custom\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073821 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-scripts\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hz9dd\" (UniqueName: \"kubernetes.io/projected/8232e2d5-3714-47a4-9739-2e370a17300b-kube-api-access-hz9dd\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073870 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073941 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.073992 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8232e2d5-3714-47a4-9739-2e370a17300b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.074025 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.074044 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.074065 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8232e2d5-3714-47a4-9739-2e370a17300b-logs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.074523 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8232e2d5-3714-47a4-9739-2e370a17300b-logs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.075226 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/8232e2d5-3714-47a4-9739-2e370a17300b-etc-machine-id\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.078142 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data-custom\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.078187 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-scripts\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.078693 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.079007 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-public-tls-certs\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.079780 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-config-data\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.080376 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8232e2d5-3714-47a4-9739-2e370a17300b-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.104946 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hz9dd\" (UniqueName: \"kubernetes.io/projected/8232e2d5-3714-47a4-9739-2e370a17300b-kube-api-access-hz9dd\") pod \"cinder-api-0\" (UID: \"8232e2d5-3714-47a4-9739-2e370a17300b\") " pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.127173 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.200650 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.459240 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon-log" containerID="cri-o://157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78" gracePeriod=30 Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.459246 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" containerID="cri-o://98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014" gracePeriod=30 Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.592357 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03538cf9-7502-4102-96b0-4a071e0e2292" path="/var/lib/kubelet/pods/03538cf9-7502-4102-96b0-4a071e0e2292/volumes" Nov 22 10:57:54 crc kubenswrapper[4926]: I1122 10:57:54.652553 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 10:57:55 crc kubenswrapper[4926]: I1122 10:57:55.478441 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8232e2d5-3714-47a4-9739-2e370a17300b","Type":"ContainerStarted","Data":"baf5e7ca758c147d3d44a99e41eec059c08cfd37ab382df629c7ededae94aee6"} Nov 22 10:57:55 crc kubenswrapper[4926]: I1122 10:57:55.478760 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8232e2d5-3714-47a4-9739-2e370a17300b","Type":"ContainerStarted","Data":"68a090a2105f3de1c4077d1e898c1153dd71e02a767d76fb8dee2682977ab987"} Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.496122 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"8232e2d5-3714-47a4-9739-2e370a17300b","Type":"ContainerStarted","Data":"169235c184c9a3013cbeecc03cd33e6b56d81137f2d0d47e8237b1bc2c2a4a2b"} Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.496584 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.539950 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.539876789 podStartE2EDuration="3.539876789s" podCreationTimestamp="2025-11-22 10:57:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:57:56.52384772 +0000 UTC m=+1096.825453047" watchObservedRunningTime="2025-11-22 10:57:56.539876789 +0000 UTC m=+1096.841482126" Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.697931 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.783121 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.882718 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.883095 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="dnsmasq-dns" containerID="cri-o://bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab" gracePeriod=10 Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.944793 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 10:57:56 crc kubenswrapper[4926]: I1122 10:57:56.945343 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.144245 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.444571 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.455765 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.455928 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.455956 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.455979 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsx2t\" (UniqueName: \"kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.456017 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.457584 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc\") pod \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\" (UID: \"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d\") " Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.487223 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t" (OuterVolumeSpecName: "kube-api-access-jsx2t") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "kube-api-access-jsx2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.506988 4926 generic.go:334] "Generic (PLEG): container finished" podID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerID="bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab" exitCode=0 Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.508300 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.508462 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" event={"ID":"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d","Type":"ContainerDied","Data":"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab"} Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.508489 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-nkxbv" event={"ID":"a2f49243-dd53-46b6-9e36-bde6fe8d4e1d","Type":"ContainerDied","Data":"689e1796bb8d4eb06ae4606d099a432308d7571b4a10fa5a8da626f09255b6b9"} Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.508504 4926 scope.go:117] "RemoveContainer" containerID="bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.519938 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.523278 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.528017 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.539975 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.564505 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.564541 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.564554 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.564567 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.564579 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsx2t\" (UniqueName: \"kubernetes.io/projected/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-kube-api-access-jsx2t\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.574427 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.576316 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config" (OuterVolumeSpecName: "config") pod "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" (UID: "a2f49243-dd53-46b6-9e36-bde6fe8d4e1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.618795 4926 scope.go:117] "RemoveContainer" containerID="784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.650101 4926 scope.go:117] "RemoveContainer" containerID="bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab" Nov 22 10:57:57 crc kubenswrapper[4926]: E1122 10:57:57.656152 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab\": container with ID starting with bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab not found: ID does not exist" containerID="bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.656404 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab"} err="failed to get container status \"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab\": rpc error: code = NotFound desc = could not find container \"bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab\": container with ID starting with bf219715a60455239c8d71aaac3e245a9c448af097cd3b05fa1094f4f2fc2fab not found: ID does not exist" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.656541 4926 scope.go:117] "RemoveContainer" containerID="784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4" Nov 22 10:57:57 crc kubenswrapper[4926]: E1122 10:57:57.657095 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4\": container with ID starting with 784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4 not found: ID does not exist" containerID="784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.657148 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4"} err="failed to get container status \"784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4\": rpc error: code = NotFound desc = could not find container \"784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4\": container with ID starting with 784732c3115f04bf2e95422ed991d3dcbf5a07b08b16d274acd9c4e742fe8da4 not found: ID does not exist" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.666705 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.851815 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:57 crc kubenswrapper[4926]: I1122 10:57:57.860768 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-nkxbv"] Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.036993 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.052386 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.075694 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.519365 4926 generic.go:334] "Generic (PLEG): container finished" podID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerID="98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014" exitCode=0 Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.519434 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerDied","Data":"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014"} Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.519931 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="cinder-scheduler" containerID="cri-o://d644a2b72e6965c29f8677c53eb1fde30d0936ad857c292aeee5c0d754905a3c" gracePeriod=30 Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.520025 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="probe" containerID="cri-o://7f95f216ed43bc6facdd0d7c8a829a47795cbf662d9dad96aa2be1e0492ca903" gracePeriod=30 Nov 22 10:57:58 crc kubenswrapper[4926]: I1122 10:57:58.590727 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" path="/var/lib/kubelet/pods/a2f49243-dd53-46b6-9e36-bde6fe8d4e1d/volumes" Nov 22 10:57:59 crc kubenswrapper[4926]: I1122 10:57:59.442828 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:59 crc kubenswrapper[4926]: I1122 10:57:59.483326 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-65b67ff7d-d2fkp" Nov 22 10:57:59 crc kubenswrapper[4926]: I1122 10:57:59.549765 4926 generic.go:334] "Generic (PLEG): container finished" podID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerID="7f95f216ed43bc6facdd0d7c8a829a47795cbf662d9dad96aa2be1e0492ca903" exitCode=0 Nov 22 10:57:59 crc kubenswrapper[4926]: I1122 10:57:59.549837 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerDied","Data":"7f95f216ed43bc6facdd0d7c8a829a47795cbf662d9dad96aa2be1e0492ca903"} Nov 22 10:57:59 crc kubenswrapper[4926]: I1122 10:57:59.751209 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-68769dd845-84s2z" Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.001949 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5f7c4dcf85-jl8kd" Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.108252 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.108516 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64c985686b-kqzqh" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-api" containerID="cri-o://9f6e38673b72f96eacdc348f5ec4654dfb34a31ecfafba67822abe86d2cad186" gracePeriod=30 Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.108626 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-64c985686b-kqzqh" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-httpd" containerID="cri-o://ab491699e4c12708fd67dba8ac5c6be86603d3691c9798faec0c28312c58cca5" gracePeriod=30 Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.559739 4926 generic.go:334] "Generic (PLEG): container finished" podID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerID="ab491699e4c12708fd67dba8ac5c6be86603d3691c9798faec0c28312c58cca5" exitCode=0 Nov 22 10:58:00 crc kubenswrapper[4926]: I1122 10:58:00.559783 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerDied","Data":"ab491699e4c12708fd67dba8ac5c6be86603d3691c9798faec0c28312c58cca5"} Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.593575 4926 generic.go:334] "Generic (PLEG): container finished" podID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerID="d644a2b72e6965c29f8677c53eb1fde30d0936ad857c292aeee5c0d754905a3c" exitCode=0 Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.596130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerDied","Data":"d644a2b72e6965c29f8677c53eb1fde30d0936ad857c292aeee5c0d754905a3c"} Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.596169 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"684951ad-f4cf-4fb2-83d6-5ce19fea1719","Type":"ContainerDied","Data":"1b90e31e27a74c1618d1309850fa547d8fdc41dc351202d24194183479c7d794"} Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.596182 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b90e31e27a74c1618d1309850fa547d8fdc41dc351202d24194183479c7d794" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.658513 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.678866 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.679067 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.679093 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd245\" (UniqueName: \"kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.679133 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.679151 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.679201 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id\") pod \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\" (UID: \"684951ad-f4cf-4fb2-83d6-5ce19fea1719\") " Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.682436 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.686143 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245" (OuterVolumeSpecName: "kube-api-access-qd245") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "kube-api-access-qd245". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.687082 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.704142 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts" (OuterVolumeSpecName: "scripts") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.771435 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.780842 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd245\" (UniqueName: \"kubernetes.io/projected/684951ad-f4cf-4fb2-83d6-5ce19fea1719-kube-api-access-qd245\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.780880 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.780906 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.780919 4926 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/684951ad-f4cf-4fb2-83d6-5ce19fea1719-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.781030 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.796369 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data" (OuterVolumeSpecName: "config-data") pod "684951ad-f4cf-4fb2-83d6-5ce19fea1719" (UID: "684951ad-f4cf-4fb2-83d6-5ce19fea1719"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:02 crc kubenswrapper[4926]: I1122 10:58:02.883201 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/684951ad-f4cf-4fb2-83d6-5ce19fea1719-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.601972 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.641632 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.642946 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.653674 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.671238 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-d84d6cb4b-w4kcl" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.671618 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:03 crc kubenswrapper[4926]: E1122 10:58:03.672033 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="probe" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672056 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="probe" Nov 22 10:58:03 crc kubenswrapper[4926]: E1122 10:58:03.672083 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="dnsmasq-dns" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672092 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="dnsmasq-dns" Nov 22 10:58:03 crc kubenswrapper[4926]: E1122 10:58:03.672109 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="cinder-scheduler" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672116 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="cinder-scheduler" Nov 22 10:58:03 crc kubenswrapper[4926]: E1122 10:58:03.672137 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="init" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672145 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="init" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672376 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2f49243-dd53-46b6-9e36-bde6fe8d4e1d" containerName="dnsmasq-dns" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672392 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="probe" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.672407 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" containerName="cinder-scheduler" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.673563 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.675388 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707371 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67lc8\" (UniqueName: \"kubernetes.io/projected/e8c6c748-bba9-4298-b0de-745cd26ccec4-kube-api-access-67lc8\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707486 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707514 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e8c6c748-bba9-4298-b0de-745cd26ccec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707534 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707610 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.707723 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.740011 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.775203 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.775452 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-867d4cfd66-ftqtw" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api-log" containerID="cri-o://cf7a06d1847cf43a40fb2690b3fc64105778392bc0f8287587552bfe07883185" gracePeriod=30 Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.775581 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-867d4cfd66-ftqtw" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api" containerID="cri-o://7d58570f8b7f83376ced816694bf451d8f04d6248b463f21a024cc08d83b574e" gracePeriod=30 Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809540 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67lc8\" (UniqueName: \"kubernetes.io/projected/e8c6c748-bba9-4298-b0de-745cd26ccec4-kube-api-access-67lc8\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809590 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809607 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e8c6c748-bba9-4298-b0de-745cd26ccec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809623 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.809679 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.811227 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/e8c6c748-bba9-4298-b0de-745cd26ccec4-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.823804 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.823813 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-scripts\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.824221 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-config-data\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.830598 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8c6c748-bba9-4298-b0de-745cd26ccec4-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.836003 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67lc8\" (UniqueName: \"kubernetes.io/projected/e8c6c748-bba9-4298-b0de-745cd26ccec4-kube-api-access-67lc8\") pod \"cinder-scheduler-0\" (UID: \"e8c6c748-bba9-4298-b0de-745cd26ccec4\") " pod="openstack/cinder-scheduler-0" Nov 22 10:58:03 crc kubenswrapper[4926]: I1122 10:58:03.994438 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.102508 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-74969bfb89-zx2cm"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.104395 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.107399 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.107680 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.107823 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114658 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9zkm\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-kube-api-access-s9zkm\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114704 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-public-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114753 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-combined-ca-bundle\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114796 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-config-data\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114816 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-etc-swift\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114865 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-internal-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114901 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-run-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.114927 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-log-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.129835 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-74969bfb89-zx2cm"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226097 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-internal-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226463 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-run-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226484 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-log-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226515 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9zkm\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-kube-api-access-s9zkm\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226541 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-public-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-combined-ca-bundle\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226644 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-config-data\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.226665 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-etc-swift\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.228523 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-log-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.228844 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-run-httpd\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.234198 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-etc-swift\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.237596 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-internal-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.238487 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-public-tls-certs\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.259788 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-combined-ca-bundle\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.260065 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9zkm\" (UniqueName: \"kubernetes.io/projected/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-kube-api-access-s9zkm\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.261293 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/033cb6e2-4f4b-46e3-a28f-61f904e65d4b-config-data\") pod \"swift-proxy-74969bfb89-zx2cm\" (UID: \"033cb6e2-4f4b-46e3-a28f-61f904e65d4b\") " pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.354794 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.358442 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.363531 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.369359 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.369441 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-mkhz9" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.369672 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.367878 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.531403 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k296\" (UniqueName: \"kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.531695 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.531748 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.531766 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.566012 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 10:58:04 crc kubenswrapper[4926]: W1122 10:58:04.571973 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8c6c748_bba9_4298_b0de_745cd26ccec4.slice/crio-c46e52ca414d5491b96343be333ccab971b2df32bdec266766753144becf0c08 WatchSource:0}: Error finding container c46e52ca414d5491b96343be333ccab971b2df32bdec266766753144becf0c08: Status 404 returned error can't find the container with id c46e52ca414d5491b96343be333ccab971b2df32bdec266766753144becf0c08 Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.603099 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="684951ad-f4cf-4fb2-83d6-5ce19fea1719" path="/var/lib/kubelet/pods/684951ad-f4cf-4fb2-83d6-5ce19fea1719/volumes" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.625268 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e8c6c748-bba9-4298-b0de-745cd26ccec4","Type":"ContainerStarted","Data":"c46e52ca414d5491b96343be333ccab971b2df32bdec266766753144becf0c08"} Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.631365 4926 generic.go:334] "Generic (PLEG): container finished" podID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerID="cf7a06d1847cf43a40fb2690b3fc64105778392bc0f8287587552bfe07883185" exitCode=143 Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.631448 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerDied","Data":"cf7a06d1847cf43a40fb2690b3fc64105778392bc0f8287587552bfe07883185"} Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.632850 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.632914 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.632934 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.633019 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k296\" (UniqueName: \"kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.637351 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.637728 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.637791 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.651398 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k296\" (UniqueName: \"kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296\") pod \"openstackclient\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.696790 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.697799 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.701571 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.732679 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.740063 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.756655 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.942574 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config-secret\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.942643 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.942691 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7jpf\" (UniqueName: \"kubernetes.io/projected/6c186926-85fd-4c52-9910-48a3c70ae9eb-kube-api-access-j7jpf\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: I1122 10:58:04.942768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:04 crc kubenswrapper[4926]: E1122 10:58:04.981280 4926 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 22 10:58:04 crc kubenswrapper[4926]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_0cbc861a-426d-4f3d-ac81-d61ae98eb45f_0(0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5" Netns:"/var/run/netns/81e30544-1121-483b-bc51-2b934ccfbf5a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5;K8S_POD_UID=0cbc861a-426d-4f3d-ac81-d61ae98eb45f" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/0cbc861a-426d-4f3d-ac81-d61ae98eb45f]: expected pod UID "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" but got "6c186926-85fd-4c52-9910-48a3c70ae9eb" from Kube API Nov 22 10:58:04 crc kubenswrapper[4926]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 22 10:58:04 crc kubenswrapper[4926]: > Nov 22 10:58:04 crc kubenswrapper[4926]: E1122 10:58:04.981375 4926 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 22 10:58:04 crc kubenswrapper[4926]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_0cbc861a-426d-4f3d-ac81-d61ae98eb45f_0(0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5" Netns:"/var/run/netns/81e30544-1121-483b-bc51-2b934ccfbf5a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=0572af366c5e3d31747504ea4c7b13747a67eece9e9ba91f49b605bb342f94f5;K8S_POD_UID=0cbc861a-426d-4f3d-ac81-d61ae98eb45f" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/0cbc861a-426d-4f3d-ac81-d61ae98eb45f]: expected pod UID "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" but got "6c186926-85fd-4c52-9910-48a3c70ae9eb" from Kube API Nov 22 10:58:04 crc kubenswrapper[4926]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 22 10:58:04 crc kubenswrapper[4926]: > pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.047863 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.048030 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7jpf\" (UniqueName: \"kubernetes.io/projected/6c186926-85fd-4c52-9910-48a3c70ae9eb-kube-api-access-j7jpf\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.048245 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.048501 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config-secret\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.051105 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.071820 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7jpf\" (UniqueName: \"kubernetes.io/projected/6c186926-85fd-4c52-9910-48a3c70ae9eb-kube-api-access-j7jpf\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.076501 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-openstack-config-secret\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.088771 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-74969bfb89-zx2cm"] Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.097269 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c186926-85fd-4c52-9910-48a3c70ae9eb-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6c186926-85fd-4c52-9910-48a3c70ae9eb\") " pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.155361 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.276684 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.277049 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-central-agent" containerID="cri-o://e057c2c43791991e9165d6cf61d6acbfd66ba63c5966a16d84e7c0c781bef430" gracePeriod=30 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.277902 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="sg-core" containerID="cri-o://2bf0f11d91c21ac1c621718d3069598344fe2d89df6009d0fcf1fdc951f587cb" gracePeriod=30 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.277979 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="proxy-httpd" containerID="cri-o://f12c7d0fbc318c22fd3030bae7cb7292e6b7fe85d2883d8289ed56c195aa7aff" gracePeriod=30 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.278047 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-notification-agent" containerID="cri-o://a4302a4e6816e40c9a574135c82c609a1db7db9aa1e6698b6215ce43dfb3893d" gracePeriod=30 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.287023 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.668354 4926 generic.go:334] "Generic (PLEG): container finished" podID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerID="f12c7d0fbc318c22fd3030bae7cb7292e6b7fe85d2883d8289ed56c195aa7aff" exitCode=0 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.668762 4926 generic.go:334] "Generic (PLEG): container finished" podID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerID="2bf0f11d91c21ac1c621718d3069598344fe2d89df6009d0fcf1fdc951f587cb" exitCode=2 Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.668903 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerDied","Data":"f12c7d0fbc318c22fd3030bae7cb7292e6b7fe85d2883d8289ed56c195aa7aff"} Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.669241 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerDied","Data":"2bf0f11d91c21ac1c621718d3069598344fe2d89df6009d0fcf1fdc951f587cb"} Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.677315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e8c6c748-bba9-4298-b0de-745cd26ccec4","Type":"ContainerStarted","Data":"222361a1e7698021f5d5de66bbc4deaefd256ee2e6625f4c2db9220c86334cb7"} Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.689457 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.690008 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-74969bfb89-zx2cm" event={"ID":"033cb6e2-4f4b-46e3-a28f-61f904e65d4b","Type":"ContainerStarted","Data":"62439dd18a927b1bbe2f32b0ee55329964e69436173525f27c6520f363a8498d"} Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.690035 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-74969bfb89-zx2cm" event={"ID":"033cb6e2-4f4b-46e3-a28f-61f904e65d4b","Type":"ContainerStarted","Data":"53473ee1ac79c81e0ce2866400a6a86b68d1dd22bc9345ea5c5b5f0d07cfa9a5"} Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.693377 4926 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0cbc861a-426d-4f3d-ac81-d61ae98eb45f" podUID="6c186926-85fd-4c52-9910-48a3c70ae9eb" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.709040 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.731373 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.874212 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret\") pod \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.874272 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config\") pod \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.874370 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle\") pod \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.874395 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k296\" (UniqueName: \"kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296\") pod \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\" (UID: \"0cbc861a-426d-4f3d-ac81-d61ae98eb45f\") " Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.875794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" (UID: "0cbc861a-426d-4f3d-ac81-d61ae98eb45f"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.880896 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" (UID: "0cbc861a-426d-4f3d-ac81-d61ae98eb45f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.886398 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296" (OuterVolumeSpecName: "kube-api-access-7k296") pod "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" (UID: "0cbc861a-426d-4f3d-ac81-d61ae98eb45f"). InnerVolumeSpecName "kube-api-access-7k296". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.893903 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "0cbc861a-426d-4f3d-ac81-d61ae98eb45f" (UID: "0cbc861a-426d-4f3d-ac81-d61ae98eb45f"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.976977 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.977015 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.977027 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:05 crc kubenswrapper[4926]: I1122 10:58:05.977037 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k296\" (UniqueName: \"kubernetes.io/projected/0cbc861a-426d-4f3d-ac81-d61ae98eb45f-kube-api-access-7k296\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.595864 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cbc861a-426d-4f3d-ac81-d61ae98eb45f" path="/var/lib/kubelet/pods/0cbc861a-426d-4f3d-ac81-d61ae98eb45f/volumes" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.699464 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6c186926-85fd-4c52-9910-48a3c70ae9eb","Type":"ContainerStarted","Data":"320a695f2ee681135bb30d99d0e91bdd20d6d7c3b38c81dcbfe84af92d016dd2"} Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.706078 4926 generic.go:334] "Generic (PLEG): container finished" podID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerID="e057c2c43791991e9165d6cf61d6acbfd66ba63c5966a16d84e7c0c781bef430" exitCode=0 Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.706176 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerDied","Data":"e057c2c43791991e9165d6cf61d6acbfd66ba63c5966a16d84e7c0c781bef430"} Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.709920 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"e8c6c748-bba9-4298-b0de-745cd26ccec4","Type":"ContainerStarted","Data":"1d68fa6de4696faa196906ddd9543345421f04600bea9ad70cffee6cb74d4a17"} Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.715264 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.716809 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-74969bfb89-zx2cm" event={"ID":"033cb6e2-4f4b-46e3-a28f-61f904e65d4b","Type":"ContainerStarted","Data":"80261f7e45fa4def0b066354560eb6f90cff024e91cf6233dcb3541dc271317f"} Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.716840 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.716906 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.731460 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.731437716 podStartE2EDuration="3.731437716s" podCreationTimestamp="2025-11-22 10:58:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:06.728484051 +0000 UTC m=+1107.030089338" watchObservedRunningTime="2025-11-22 10:58:06.731437716 +0000 UTC m=+1107.033043003" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.750133 4926 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="0cbc861a-426d-4f3d-ac81-d61ae98eb45f" podUID="6c186926-85fd-4c52-9910-48a3c70ae9eb" Nov 22 10:58:06 crc kubenswrapper[4926]: I1122 10:58:06.755968 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-74969bfb89-zx2cm" podStartSLOduration=2.755943596 podStartE2EDuration="2.755943596s" podCreationTimestamp="2025-11-22 10:58:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:06.743224542 +0000 UTC m=+1107.044829819" watchObservedRunningTime="2025-11-22 10:58:06.755943596 +0000 UTC m=+1107.057548883" Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.016077 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-867d4cfd66-ftqtw" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": read tcp 10.217.0.2:45330->10.217.0.163:9311: read: connection reset by peer" Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.016708 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-867d4cfd66-ftqtw" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": read tcp 10.217.0.2:45340->10.217.0.163:9311: read: connection reset by peer" Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.030809 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.763091 4926 generic.go:334] "Generic (PLEG): container finished" podID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerID="7d58570f8b7f83376ced816694bf451d8f04d6248b463f21a024cc08d83b574e" exitCode=0 Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.763345 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerDied","Data":"7d58570f8b7f83376ced816694bf451d8f04d6248b463f21a024cc08d83b574e"} Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.771507 4926 generic.go:334] "Generic (PLEG): container finished" podID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerID="9f6e38673b72f96eacdc348f5ec4654dfb34a31ecfafba67822abe86d2cad186" exitCode=0 Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.773135 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerDied","Data":"9f6e38673b72f96eacdc348f5ec4654dfb34a31ecfafba67822abe86d2cad186"} Nov 22 10:58:07 crc kubenswrapper[4926]: I1122 10:58:07.982377 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.075272 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.135486 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs\") pod \"b85c12f2-c589-476e-94b3-cc7650c154eb\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.135586 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x52rt\" (UniqueName: \"kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt\") pod \"b85c12f2-c589-476e-94b3-cc7650c154eb\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.135813 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom\") pod \"b85c12f2-c589-476e-94b3-cc7650c154eb\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.135846 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle\") pod \"b85c12f2-c589-476e-94b3-cc7650c154eb\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.135917 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data\") pod \"b85c12f2-c589-476e-94b3-cc7650c154eb\" (UID: \"b85c12f2-c589-476e-94b3-cc7650c154eb\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.137700 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs" (OuterVolumeSpecName: "logs") pod "b85c12f2-c589-476e-94b3-cc7650c154eb" (UID: "b85c12f2-c589-476e-94b3-cc7650c154eb"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.154155 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt" (OuterVolumeSpecName: "kube-api-access-x52rt") pod "b85c12f2-c589-476e-94b3-cc7650c154eb" (UID: "b85c12f2-c589-476e-94b3-cc7650c154eb"). InnerVolumeSpecName "kube-api-access-x52rt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.154290 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b85c12f2-c589-476e-94b3-cc7650c154eb" (UID: "b85c12f2-c589-476e-94b3-cc7650c154eb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.194858 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b85c12f2-c589-476e-94b3-cc7650c154eb" (UID: "b85c12f2-c589-476e-94b3-cc7650c154eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.203549 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data" (OuterVolumeSpecName: "config-data") pod "b85c12f2-c589-476e-94b3-cc7650c154eb" (UID: "b85c12f2-c589-476e-94b3-cc7650c154eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.237625 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b85c12f2-c589-476e-94b3-cc7650c154eb-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.237668 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x52rt\" (UniqueName: \"kubernetes.io/projected/b85c12f2-c589-476e-94b3-cc7650c154eb-kube-api-access-x52rt\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.237681 4926 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.237690 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.237699 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b85c12f2-c589-476e-94b3-cc7650c154eb-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.263698 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.440305 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5drgf\" (UniqueName: \"kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf\") pod \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.440477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle\") pod \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.440549 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs\") pod \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.440567 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config\") pod \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.440656 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config\") pod \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\" (UID: \"dc8c34a7-9a8c-4ae9-afb4-eebee493f602\") " Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.444860 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf" (OuterVolumeSpecName: "kube-api-access-5drgf") pod "dc8c34a7-9a8c-4ae9-afb4-eebee493f602" (UID: "dc8c34a7-9a8c-4ae9-afb4-eebee493f602"). InnerVolumeSpecName "kube-api-access-5drgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.447560 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "dc8c34a7-9a8c-4ae9-afb4-eebee493f602" (UID: "dc8c34a7-9a8c-4ae9-afb4-eebee493f602"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.512789 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config" (OuterVolumeSpecName: "config") pod "dc8c34a7-9a8c-4ae9-afb4-eebee493f602" (UID: "dc8c34a7-9a8c-4ae9-afb4-eebee493f602"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.517964 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "dc8c34a7-9a8c-4ae9-afb4-eebee493f602" (UID: "dc8c34a7-9a8c-4ae9-afb4-eebee493f602"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.518024 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dc8c34a7-9a8c-4ae9-afb4-eebee493f602" (UID: "dc8c34a7-9a8c-4ae9-afb4-eebee493f602"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.543293 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.543340 4926 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.543355 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.543368 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.543380 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5drgf\" (UniqueName: \"kubernetes.io/projected/dc8c34a7-9a8c-4ae9-afb4-eebee493f602-kube-api-access-5drgf\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.783274 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-867d4cfd66-ftqtw" event={"ID":"b85c12f2-c589-476e-94b3-cc7650c154eb","Type":"ContainerDied","Data":"5bd833ed1e1e403f85abc3fc3e3f6ed58aa5288135bfdfc5f0a129c53111f3ad"} Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.783308 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-867d4cfd66-ftqtw" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.783605 4926 scope.go:117] "RemoveContainer" containerID="7d58570f8b7f83376ced816694bf451d8f04d6248b463f21a024cc08d83b574e" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.789908 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-64c985686b-kqzqh" event={"ID":"dc8c34a7-9a8c-4ae9-afb4-eebee493f602","Type":"ContainerDied","Data":"b610cf4d2c20cc88a9b0d4c57fadd7eb904155b32a0fbdf1fb2004005d1ceb7b"} Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.789957 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-64c985686b-kqzqh" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.805923 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.824147 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-867d4cfd66-ftqtw"] Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.839741 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.845318 4926 scope.go:117] "RemoveContainer" containerID="cf7a06d1847cf43a40fb2690b3fc64105778392bc0f8287587552bfe07883185" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.848159 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-64c985686b-kqzqh"] Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.868045 4926 scope.go:117] "RemoveContainer" containerID="ab491699e4c12708fd67dba8ac5c6be86603d3691c9798faec0c28312c58cca5" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.889365 4926 scope.go:117] "RemoveContainer" containerID="9f6e38673b72f96eacdc348f5ec4654dfb34a31ecfafba67822abe86d2cad186" Nov 22 10:58:08 crc kubenswrapper[4926]: I1122 10:58:08.995155 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.800876 4926 generic.go:334] "Generic (PLEG): container finished" podID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerID="a4302a4e6816e40c9a574135c82c609a1db7db9aa1e6698b6215ce43dfb3893d" exitCode=0 Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.800916 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerDied","Data":"a4302a4e6816e40c9a574135c82c609a1db7db9aa1e6698b6215ce43dfb3893d"} Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.876609 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.970689 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971037 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971479 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-smk8w\" (UniqueName: \"kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971546 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971684 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971744 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.971779 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd\") pod \"8d021220-ab5c-4d9c-9b7a-dd2248121353\" (UID: \"8d021220-ab5c-4d9c-9b7a-dd2248121353\") " Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.972609 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.973131 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.990862 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts" (OuterVolumeSpecName: "scripts") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:09 crc kubenswrapper[4926]: I1122 10:58:09.991237 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w" (OuterVolumeSpecName: "kube-api-access-smk8w") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "kube-api-access-smk8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.004834 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.070719 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074093 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074122 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074133 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074146 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8d021220-ab5c-4d9c-9b7a-dd2248121353-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074158 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.074172 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-smk8w\" (UniqueName: \"kubernetes.io/projected/8d021220-ab5c-4d9c-9b7a-dd2248121353-kube-api-access-smk8w\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.092202 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data" (OuterVolumeSpecName: "config-data") pod "8d021220-ab5c-4d9c-9b7a-dd2248121353" (UID: "8d021220-ab5c-4d9c-9b7a-dd2248121353"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.176710 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8d021220-ab5c-4d9c-9b7a-dd2248121353-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.605052 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" path="/var/lib/kubelet/pods/b85c12f2-c589-476e-94b3-cc7650c154eb/volumes" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.605669 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" path="/var/lib/kubelet/pods/dc8c34a7-9a8c-4ae9-afb4-eebee493f602/volumes" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.818174 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8d021220-ab5c-4d9c-9b7a-dd2248121353","Type":"ContainerDied","Data":"16f6ec5138e15730f597da51e53216f45abb5c1c78d9852df631d8dfd843e404"} Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.818230 4926 scope.go:117] "RemoveContainer" containerID="f12c7d0fbc318c22fd3030bae7cb7292e6b7fe85d2883d8289ed56c195aa7aff" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.818349 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.869079 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.882390 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.910944 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911581 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="sg-core" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911602 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="sg-core" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911626 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911633 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911652 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-notification-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911659 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-notification-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911677 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="proxy-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911683 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="proxy-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911700 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api-log" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911707 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api-log" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911717 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-api" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911724 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-api" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911736 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-central-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911742 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-central-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: E1122 10:58:10.911772 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.911781 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912114 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="sg-core" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912144 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912155 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912172 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="proxy-httpd" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912188 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-notification-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912204 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc8c34a7-9a8c-4ae9-afb4-eebee493f602" containerName="neutron-api" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912221 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" containerName="ceilometer-central-agent" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.912238 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b85c12f2-c589-476e-94b3-cc7650c154eb" containerName="barbican-api-log" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.923670 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.923801 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.927912 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:58:10 crc kubenswrapper[4926]: I1122 10:58:10.937145 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001193 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001252 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vssjc\" (UniqueName: \"kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001283 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001421 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001508 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.001795 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145610 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145706 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vssjc\" (UniqueName: \"kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145752 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145782 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145812 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145916 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.145948 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.146537 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.150618 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.158418 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.158556 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.160509 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.166450 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vssjc\" (UniqueName: \"kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.171027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts\") pod \"ceilometer-0\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " pod="openstack/ceilometer-0" Nov 22 10:58:11 crc kubenswrapper[4926]: I1122 10:58:11.256096 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:12 crc kubenswrapper[4926]: I1122 10:58:12.595434 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d021220-ab5c-4d9c-9b7a-dd2248121353" path="/var/lib/kubelet/pods/8d021220-ab5c-4d9c-9b7a-dd2248121353/volumes" Nov 22 10:58:14 crc kubenswrapper[4926]: I1122 10:58:14.360976 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:14 crc kubenswrapper[4926]: I1122 10:58:14.361276 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-74969bfb89-zx2cm" Nov 22 10:58:14 crc kubenswrapper[4926]: I1122 10:58:14.362440 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 10:58:15 crc kubenswrapper[4926]: I1122 10:58:15.793772 4926 scope.go:117] "RemoveContainer" containerID="2bf0f11d91c21ac1c621718d3069598344fe2d89df6009d0fcf1fdc951f587cb" Nov 22 10:58:15 crc kubenswrapper[4926]: I1122 10:58:15.826698 4926 scope.go:117] "RemoveContainer" containerID="a4302a4e6816e40c9a574135c82c609a1db7db9aa1e6698b6215ce43dfb3893d" Nov 22 10:58:15 crc kubenswrapper[4926]: I1122 10:58:15.894040 4926 scope.go:117] "RemoveContainer" containerID="e057c2c43791991e9165d6cf61d6acbfd66ba63c5966a16d84e7c0c781bef430" Nov 22 10:58:16 crc kubenswrapper[4926]: I1122 10:58:16.301667 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:16 crc kubenswrapper[4926]: I1122 10:58:16.898959 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerStarted","Data":"3dc9eee4a6fbe723db0dfda96d4d801af4a4f7121e942cd48ea1b9429e48d6c3"} Nov 22 10:58:16 crc kubenswrapper[4926]: I1122 10:58:16.902167 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6c186926-85fd-4c52-9910-48a3c70ae9eb","Type":"ContainerStarted","Data":"093a1d742e45c329ba3022714d77c6abfa6cebdb919891033a72f2935948bb2e"} Nov 22 10:58:16 crc kubenswrapper[4926]: I1122 10:58:16.922571 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.70151109 podStartE2EDuration="12.92254892s" podCreationTimestamp="2025-11-22 10:58:04 +0000 UTC" firstStartedPulling="2025-11-22 10:58:05.746162645 +0000 UTC m=+1106.047767932" lastFinishedPulling="2025-11-22 10:58:15.967200475 +0000 UTC m=+1116.268805762" observedRunningTime="2025-11-22 10:58:16.915474278 +0000 UTC m=+1117.217079565" watchObservedRunningTime="2025-11-22 10:58:16.92254892 +0000 UTC m=+1117.224154207" Nov 22 10:58:16 crc kubenswrapper[4926]: I1122 10:58:16.983688 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:17 crc kubenswrapper[4926]: I1122 10:58:17.914996 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerStarted","Data":"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8"} Nov 22 10:58:17 crc kubenswrapper[4926]: I1122 10:58:17.915517 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerStarted","Data":"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98"} Nov 22 10:58:18 crc kubenswrapper[4926]: I1122 10:58:18.077054 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67f69cf99d-5jsdr" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.149:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.149:8443: connect: connection refused" Nov 22 10:58:18 crc kubenswrapper[4926]: I1122 10:58:18.077161 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:58:18 crc kubenswrapper[4926]: I1122 10:58:18.928447 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerStarted","Data":"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a"} Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.949392 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerStarted","Data":"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69"} Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.950336 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.949645 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-central-agent" containerID="cri-o://b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98" gracePeriod=30 Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.949704 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="sg-core" containerID="cri-o://3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a" gracePeriod=30 Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.949719 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-notification-agent" containerID="cri-o://dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8" gracePeriod=30 Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.949661 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="proxy-httpd" containerID="cri-o://e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69" gracePeriod=30 Nov 22 10:58:20 crc kubenswrapper[4926]: I1122 10:58:20.995785 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=7.402839408 podStartE2EDuration="10.995762138s" podCreationTimestamp="2025-11-22 10:58:10 +0000 UTC" firstStartedPulling="2025-11-22 10:58:16.304526676 +0000 UTC m=+1116.606131963" lastFinishedPulling="2025-11-22 10:58:19.897449406 +0000 UTC m=+1120.199054693" observedRunningTime="2025-11-22 10:58:20.991782484 +0000 UTC m=+1121.293387771" watchObservedRunningTime="2025-11-22 10:58:20.995762138 +0000 UTC m=+1121.297367425" Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.977836 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerID="e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69" exitCode=0 Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.978199 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerID="3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a" exitCode=2 Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.978213 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerID="dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8" exitCode=0 Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.977940 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerDied","Data":"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69"} Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.978257 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerDied","Data":"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a"} Nov 22 10:58:21 crc kubenswrapper[4926]: I1122 10:58:21.978276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerDied","Data":"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8"} Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.260440 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-vnfm7"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.261749 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.300963 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-vnfm7"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.349649 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.349703 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgt4b\" (UniqueName: \"kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.451034 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.451086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgt4b\" (UniqueName: \"kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.451856 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.465015 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-e910-account-create-update-xt8jl"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.466204 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.469815 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.471833 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-wpt7g"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.472999 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.482707 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e910-account-create-update-xt8jl"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.488302 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgt4b\" (UniqueName: \"kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b\") pod \"nova-api-db-create-vnfm7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.512947 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wpt7g"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.559561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nksdh\" (UniqueName: \"kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.559602 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xftxc\" (UniqueName: \"kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.559643 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.559658 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.575867 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-26xp6"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.577408 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.581101 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.599596 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-26xp6"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.661257 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nksdh\" (UniqueName: \"kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.661505 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xftxc\" (UniqueName: \"kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.661559 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.661585 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.662303 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.662583 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.678714 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nksdh\" (UniqueName: \"kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh\") pod \"nova-api-e910-account-create-update-xt8jl\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.679632 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xftxc\" (UniqueName: \"kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc\") pod \"nova-cell0-db-create-wpt7g\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.762822 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.762970 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92blt\" (UniqueName: \"kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.766000 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-c378-account-create-update-8k259"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.770334 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.775774 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.790248 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.793129 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c378-account-create-update-8k259"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.865977 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.866321 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.866372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92blt\" (UniqueName: \"kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.866405 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5fpp\" (UniqueName: \"kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.867176 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.867616 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-adc5-account-create-update-nk5jm"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.867876 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.871249 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.882241 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.892327 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92blt\" (UniqueName: \"kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt\") pod \"nova-cell1-db-create-26xp6\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.894928 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-adc5-account-create-update-nk5jm"] Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.901844 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.968385 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.968558 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gnpj\" (UniqueName: \"kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.968594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.968642 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5fpp\" (UniqueName: \"kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.969727 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:22 crc kubenswrapper[4926]: I1122 10:58:22.985756 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5fpp\" (UniqueName: \"kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp\") pod \"nova-cell0-c378-account-create-update-8k259\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.071164 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gnpj\" (UniqueName: \"kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.071221 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.072244 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.091371 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-vnfm7"] Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.092202 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gnpj\" (UniqueName: \"kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj\") pod \"nova-cell1-adc5-account-create-update-nk5jm\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.109594 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.215243 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.319630 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-e910-account-create-update-xt8jl"] Nov 22 10:58:23 crc kubenswrapper[4926]: W1122 10:58:23.321311 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e45e318_c0a2_473e_8390_aaa51821b3f8.slice/crio-8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00 WatchSource:0}: Error finding container 8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00: Status 404 returned error can't find the container with id 8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00 Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.456046 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-wpt7g"] Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.480189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-26xp6"] Nov 22 10:58:23 crc kubenswrapper[4926]: W1122 10:58:23.492135 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd09ba1b7_6aa2_4c58_a281_1cee30d6c27d.slice/crio-167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f WatchSource:0}: Error finding container 167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f: Status 404 returned error can't find the container with id 167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.649054 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c378-account-create-update-8k259"] Nov 22 10:58:23 crc kubenswrapper[4926]: W1122 10:58:23.664999 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod30ad455a_8882_418d_ac1f_4ba80ba554af.slice/crio-5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150 WatchSource:0}: Error finding container 5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150: Status 404 returned error can't find the container with id 5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150 Nov 22 10:58:23 crc kubenswrapper[4926]: I1122 10:58:23.801586 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-adc5-account-create-update-nk5jm"] Nov 22 10:58:23 crc kubenswrapper[4926]: W1122 10:58:23.805003 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2c9d3cf_6437_4b11_95b0_437c2d2eed0b.slice/crio-cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76 WatchSource:0}: Error finding container cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76: Status 404 returned error can't find the container with id cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76 Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.001239 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e910-account-create-update-xt8jl" event={"ID":"9e45e318-c0a2-473e-8390-aaa51821b3f8","Type":"ContainerStarted","Data":"f7e428f06abb5ca1a34c54b719c039beedcb72b7a3cdef3bc77141e8798cd2de"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.001595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e910-account-create-update-xt8jl" event={"ID":"9e45e318-c0a2-473e-8390-aaa51821b3f8","Type":"ContainerStarted","Data":"8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.003309 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-26xp6" event={"ID":"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d","Type":"ContainerStarted","Data":"167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.005065 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vnfm7" event={"ID":"098e9322-614e-4c4a-9fca-6479e57018d7","Type":"ContainerStarted","Data":"4a1866c8fc59d617f6dd96e52249df564a92df3cd244b642800560f5dfe36561"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.005097 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vnfm7" event={"ID":"098e9322-614e-4c4a-9fca-6479e57018d7","Type":"ContainerStarted","Data":"0c90715c0362fc170e4aa7fbc27c1281ba5ed06e7c8040d50c34631c067248dd"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.006119 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wpt7g" event={"ID":"219be0fb-9881-4029-8ab1-03a46157cd21","Type":"ContainerStarted","Data":"210ac85255e0603599c22525fb63433c7205145a43c49880d1d52979d59adff7"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.007431 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" event={"ID":"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b","Type":"ContainerStarted","Data":"cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.008514 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c378-account-create-update-8k259" event={"ID":"30ad455a-8882-418d-ac1f-4ba80ba554af","Type":"ContainerStarted","Data":"5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150"} Nov 22 10:58:24 crc kubenswrapper[4926]: I1122 10:58:24.945348 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.018247 4926 generic.go:334] "Generic (PLEG): container finished" podID="d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" containerID="f7a0c0e8acaff98b4b176b21ff6140648e238268a03e5e05399775af2365981b" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.018315 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-26xp6" event={"ID":"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d","Type":"ContainerDied","Data":"f7a0c0e8acaff98b4b176b21ff6140648e238268a03e5e05399775af2365981b"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.020040 4926 generic.go:334] "Generic (PLEG): container finished" podID="098e9322-614e-4c4a-9fca-6479e57018d7" containerID="4a1866c8fc59d617f6dd96e52249df564a92df3cd244b642800560f5dfe36561" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.020115 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vnfm7" event={"ID":"098e9322-614e-4c4a-9fca-6479e57018d7","Type":"ContainerDied","Data":"4a1866c8fc59d617f6dd96e52249df564a92df3cd244b642800560f5dfe36561"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.021380 4926 generic.go:334] "Generic (PLEG): container finished" podID="219be0fb-9881-4029-8ab1-03a46157cd21" containerID="47a1f2be6d44036898be3920ddd7a45f29eb3b3ddce8d18a7d2b15814ab77f88" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.021428 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wpt7g" event={"ID":"219be0fb-9881-4029-8ab1-03a46157cd21","Type":"ContainerDied","Data":"47a1f2be6d44036898be3920ddd7a45f29eb3b3ddce8d18a7d2b15814ab77f88"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.023317 4926 generic.go:334] "Generic (PLEG): container finished" podID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerID="157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78" exitCode=137 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.023386 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67f69cf99d-5jsdr" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.023389 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerDied","Data":"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.023521 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67f69cf99d-5jsdr" event={"ID":"0566b619-da0e-49ff-b282-3d2bb8ae4fe6","Type":"ContainerDied","Data":"e688e9a218a7f4ffba3d9e0461073371f898db61a336aa1faad7da5c0a7172c7"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.023552 4926 scope.go:117] "RemoveContainer" containerID="98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.025071 4926 generic.go:334] "Generic (PLEG): container finished" podID="b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" containerID="a265a4adc4f734e4f1e63b892e50a1371d9b186c0748c492f323cf616680143b" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.025127 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" event={"ID":"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b","Type":"ContainerDied","Data":"a265a4adc4f734e4f1e63b892e50a1371d9b186c0748c492f323cf616680143b"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.027765 4926 generic.go:334] "Generic (PLEG): container finished" podID="30ad455a-8882-418d-ac1f-4ba80ba554af" containerID="51817e95860a44ff6c51a5a514b9d2620d7d561b8222d25088ebf15a8603c5de" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.027827 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c378-account-create-update-8k259" event={"ID":"30ad455a-8882-418d-ac1f-4ba80ba554af","Type":"ContainerDied","Data":"51817e95860a44ff6c51a5a514b9d2620d7d561b8222d25088ebf15a8603c5de"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.029981 4926 generic.go:334] "Generic (PLEG): container finished" podID="9e45e318-c0a2-473e-8390-aaa51821b3f8" containerID="f7e428f06abb5ca1a34c54b719c039beedcb72b7a3cdef3bc77141e8798cd2de" exitCode=0 Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.030008 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e910-account-create-update-xt8jl" event={"ID":"9e45e318-c0a2-473e-8390-aaa51821b3f8","Type":"ContainerDied","Data":"f7e428f06abb5ca1a34c54b719c039beedcb72b7a3cdef3bc77141e8798cd2de"} Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113280 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113380 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113447 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113490 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpnrt\" (UniqueName: \"kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113521 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113547 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.113608 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data\") pod \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\" (UID: \"0566b619-da0e-49ff-b282-3d2bb8ae4fe6\") " Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.114525 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs" (OuterVolumeSpecName: "logs") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.118488 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.119572 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt" (OuterVolumeSpecName: "kube-api-access-cpnrt") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "kube-api-access-cpnrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.166438 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.184732 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data" (OuterVolumeSpecName: "config-data") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.200075 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts" (OuterVolumeSpecName: "scripts") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.201556 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "0566b619-da0e-49ff-b282-3d2bb8ae4fe6" (UID: "0566b619-da0e-49ff-b282-3d2bb8ae4fe6"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.207089 4926 scope.go:117] "RemoveContainer" containerID="157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215153 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215180 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215192 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpnrt\" (UniqueName: \"kubernetes.io/projected/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-kube-api-access-cpnrt\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215201 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215209 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215217 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.215226 4926 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/0566b619-da0e-49ff-b282-3d2bb8ae4fe6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.251288 4926 scope.go:117] "RemoveContainer" containerID="98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014" Nov 22 10:58:25 crc kubenswrapper[4926]: E1122 10:58:25.251838 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014\": container with ID starting with 98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014 not found: ID does not exist" containerID="98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.251870 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014"} err="failed to get container status \"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014\": rpc error: code = NotFound desc = could not find container \"98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014\": container with ID starting with 98fc8abd427d22d1871572b84594162b7ae30e6ea712de1713f1013870e32014 not found: ID does not exist" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.251948 4926 scope.go:117] "RemoveContainer" containerID="157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78" Nov 22 10:58:25 crc kubenswrapper[4926]: E1122 10:58:25.252322 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78\": container with ID starting with 157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78 not found: ID does not exist" containerID="157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.252358 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78"} err="failed to get container status \"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78\": rpc error: code = NotFound desc = could not find container \"157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78\": container with ID starting with 157148725a567def2b2c6e2ae4298b306aa1e25dd9c7f33941bf227f511bdc78 not found: ID does not exist" Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.359482 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:58:25 crc kubenswrapper[4926]: I1122 10:58:25.371146 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-67f69cf99d-5jsdr"] Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.566548 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.594298 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" path="/var/lib/kubelet/pods/0566b619-da0e-49ff-b282-3d2bb8ae4fe6/volumes" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.742377 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92blt\" (UniqueName: \"kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt\") pod \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.742818 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts\") pod \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\" (UID: \"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d\") " Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.743801 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" (UID: "d09ba1b7-6aa2-4c58-a281-1cee30d6c27d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.753260 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt" (OuterVolumeSpecName: "kube-api-access-92blt") pod "d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" (UID: "d09ba1b7-6aa2-4c58-a281-1cee30d6c27d"). InnerVolumeSpecName "kube-api-access-92blt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.845449 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92blt\" (UniqueName: \"kubernetes.io/projected/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-kube-api-access-92blt\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.845521 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.900373 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.907533 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.927483 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.960162 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:26 crc kubenswrapper[4926]: I1122 10:58:26.962120 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.047753 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xftxc\" (UniqueName: \"kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc\") pod \"219be0fb-9881-4029-8ab1-03a46157cd21\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048141 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts\") pod \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048272 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgt4b\" (UniqueName: \"kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b\") pod \"098e9322-614e-4c4a-9fca-6479e57018d7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048439 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts\") pod \"098e9322-614e-4c4a-9fca-6479e57018d7\" (UID: \"098e9322-614e-4c4a-9fca-6479e57018d7\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048553 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts\") pod \"30ad455a-8882-418d-ac1f-4ba80ba554af\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048688 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts\") pod \"9e45e318-c0a2-473e-8390-aaa51821b3f8\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048833 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts\") pod \"219be0fb-9881-4029-8ab1-03a46157cd21\" (UID: \"219be0fb-9881-4029-8ab1-03a46157cd21\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.048953 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5fpp\" (UniqueName: \"kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp\") pod \"30ad455a-8882-418d-ac1f-4ba80ba554af\" (UID: \"30ad455a-8882-418d-ac1f-4ba80ba554af\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.049074 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nksdh\" (UniqueName: \"kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh\") pod \"9e45e318-c0a2-473e-8390-aaa51821b3f8\" (UID: \"9e45e318-c0a2-473e-8390-aaa51821b3f8\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.049178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gnpj\" (UniqueName: \"kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj\") pod \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\" (UID: \"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b\") " Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.049845 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "098e9322-614e-4c4a-9fca-6479e57018d7" (UID: "098e9322-614e-4c4a-9fca-6479e57018d7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.049899 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "219be0fb-9881-4029-8ab1-03a46157cd21" (UID: "219be0fb-9881-4029-8ab1-03a46157cd21"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.050273 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" (UID: "b2c9d3cf-6437-4b11-95b0-437c2d2eed0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.050368 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "30ad455a-8882-418d-ac1f-4ba80ba554af" (UID: "30ad455a-8882-418d-ac1f-4ba80ba554af"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.051016 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9e45e318-c0a2-473e-8390-aaa51821b3f8" (UID: "9e45e318-c0a2-473e-8390-aaa51821b3f8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.053016 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc" (OuterVolumeSpecName: "kube-api-access-xftxc") pod "219be0fb-9881-4029-8ab1-03a46157cd21" (UID: "219be0fb-9881-4029-8ab1-03a46157cd21"). InnerVolumeSpecName "kube-api-access-xftxc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.055709 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp" (OuterVolumeSpecName: "kube-api-access-q5fpp") pod "30ad455a-8882-418d-ac1f-4ba80ba554af" (UID: "30ad455a-8882-418d-ac1f-4ba80ba554af"). InnerVolumeSpecName "kube-api-access-q5fpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.057245 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b" (OuterVolumeSpecName: "kube-api-access-bgt4b") pod "098e9322-614e-4c4a-9fca-6479e57018d7" (UID: "098e9322-614e-4c4a-9fca-6479e57018d7"). InnerVolumeSpecName "kube-api-access-bgt4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.061258 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj" (OuterVolumeSpecName: "kube-api-access-2gnpj") pod "b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" (UID: "b2c9d3cf-6437-4b11-95b0-437c2d2eed0b"). InnerVolumeSpecName "kube-api-access-2gnpj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.066665 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh" (OuterVolumeSpecName: "kube-api-access-nksdh") pod "9e45e318-c0a2-473e-8390-aaa51821b3f8" (UID: "9e45e318-c0a2-473e-8390-aaa51821b3f8"). InnerVolumeSpecName "kube-api-access-nksdh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.070838 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-wpt7g" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.070871 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-wpt7g" event={"ID":"219be0fb-9881-4029-8ab1-03a46157cd21","Type":"ContainerDied","Data":"210ac85255e0603599c22525fb63433c7205145a43c49880d1d52979d59adff7"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.071116 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="210ac85255e0603599c22525fb63433c7205145a43c49880d1d52979d59adff7" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.073868 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" event={"ID":"b2c9d3cf-6437-4b11-95b0-437c2d2eed0b","Type":"ContainerDied","Data":"cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.073922 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb173634c43bb48116a31f68ef5ceca1c25f8aa0d68410a291645f0e336d7b76" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.073979 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-adc5-account-create-update-nk5jm" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.083282 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c378-account-create-update-8k259" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.083530 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c378-account-create-update-8k259" event={"ID":"30ad455a-8882-418d-ac1f-4ba80ba554af","Type":"ContainerDied","Data":"5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.083641 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5621937a9b6c33c1ab4e376bbca1e8700f43d3fb273848cd2c99bd079a374150" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.086976 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-e910-account-create-update-xt8jl" event={"ID":"9e45e318-c0a2-473e-8390-aaa51821b3f8","Type":"ContainerDied","Data":"8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.087028 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8bbd8663c1de13072006425f5340e5833c2939cdb242f69933cf7a43bb4e7c00" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.087109 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-e910-account-create-update-xt8jl" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.094390 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-26xp6" event={"ID":"d09ba1b7-6aa2-4c58-a281-1cee30d6c27d","Type":"ContainerDied","Data":"167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.094433 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="167a177cd9ca7ac27f8c6cc422e73ca960c058bcc67f7362f81fe74d81d97a8f" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.094406 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-26xp6" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.102234 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-vnfm7" event={"ID":"098e9322-614e-4c4a-9fca-6479e57018d7","Type":"ContainerDied","Data":"0c90715c0362fc170e4aa7fbc27c1281ba5ed06e7c8040d50c34631c067248dd"} Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.102285 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c90715c0362fc170e4aa7fbc27c1281ba5ed06e7c8040d50c34631c067248dd" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.102353 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-vnfm7" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.107351 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.107615 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-log" containerID="cri-o://994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7" gracePeriod=30 Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.107776 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-httpd" containerID="cri-o://12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec" gracePeriod=30 Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151410 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgt4b\" (UniqueName: \"kubernetes.io/projected/098e9322-614e-4c4a-9fca-6479e57018d7-kube-api-access-bgt4b\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151447 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/098e9322-614e-4c4a-9fca-6479e57018d7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151459 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30ad455a-8882-418d-ac1f-4ba80ba554af-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151470 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e45e318-c0a2-473e-8390-aaa51821b3f8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151482 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/219be0fb-9881-4029-8ab1-03a46157cd21-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151492 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5fpp\" (UniqueName: \"kubernetes.io/projected/30ad455a-8882-418d-ac1f-4ba80ba554af-kube-api-access-q5fpp\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151502 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nksdh\" (UniqueName: \"kubernetes.io/projected/9e45e318-c0a2-473e-8390-aaa51821b3f8-kube-api-access-nksdh\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151627 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gnpj\" (UniqueName: \"kubernetes.io/projected/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-kube-api-access-2gnpj\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151642 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xftxc\" (UniqueName: \"kubernetes.io/projected/219be0fb-9881-4029-8ab1-03a46157cd21-kube-api-access-xftxc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:27 crc kubenswrapper[4926]: I1122 10:58:27.151651 4926 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:28 crc kubenswrapper[4926]: I1122 10:58:28.112300 4926 generic.go:334] "Generic (PLEG): container finished" podID="59000497-7fb0-496d-afca-21b04b8d59e4" containerID="994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7" exitCode=143 Nov 22 10:58:28 crc kubenswrapper[4926]: I1122 10:58:28.112554 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerDied","Data":"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7"} Nov 22 10:58:28 crc kubenswrapper[4926]: I1122 10:58:28.611840 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:28 crc kubenswrapper[4926]: I1122 10:58:28.612122 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-log" containerID="cri-o://4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc" gracePeriod=30 Nov 22 10:58:28 crc kubenswrapper[4926]: I1122 10:58:28.612270 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-httpd" containerID="cri-o://4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195" gracePeriod=30 Nov 22 10:58:29 crc kubenswrapper[4926]: I1122 10:58:29.124667 4926 generic.go:334] "Generic (PLEG): container finished" podID="32257092-e014-4913-99e3-a92b522301e2" containerID="4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc" exitCode=143 Nov 22 10:58:29 crc kubenswrapper[4926]: I1122 10:58:29.124779 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerDied","Data":"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc"} Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.749171 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.931918 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932013 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932055 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932086 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932124 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932171 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rm4zg\" (UniqueName: \"kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932282 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.932373 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run\") pod \"59000497-7fb0-496d-afca-21b04b8d59e4\" (UID: \"59000497-7fb0-496d-afca-21b04b8d59e4\") " Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.933771 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs" (OuterVolumeSpecName: "logs") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.934205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.938972 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "glance") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.939003 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg" (OuterVolumeSpecName: "kube-api-access-rm4zg") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "kube-api-access-rm4zg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.940965 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts" (OuterVolumeSpecName: "scripts") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:30 crc kubenswrapper[4926]: I1122 10:58:30.976349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.000236 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data" (OuterVolumeSpecName: "config-data") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.001304 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "59000497-7fb0-496d-afca-21b04b8d59e4" (UID: "59000497-7fb0-496d-afca-21b04b8d59e4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034431 4926 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034662 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034674 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59000497-7fb0-496d-afca-21b04b8d59e4-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034684 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034693 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034702 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59000497-7fb0-496d-afca-21b04b8d59e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034748 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.034768 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rm4zg\" (UniqueName: \"kubernetes.io/projected/59000497-7fb0-496d-afca-21b04b8d59e4-kube-api-access-rm4zg\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.054233 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.136314 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.144566 4926 generic.go:334] "Generic (PLEG): container finished" podID="59000497-7fb0-496d-afca-21b04b8d59e4" containerID="12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec" exitCode=0 Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.144619 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerDied","Data":"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec"} Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.144665 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"59000497-7fb0-496d-afca-21b04b8d59e4","Type":"ContainerDied","Data":"eca2b1377003b3f000c58f3734b9b2830d35ae6899a9a6adfe7d48b1ffc5faca"} Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.144685 4926 scope.go:117] "RemoveContainer" containerID="12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.144623 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.186988 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.193418 4926 scope.go:117] "RemoveContainer" containerID="994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.196000 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.215818 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216453 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="098e9322-614e-4c4a-9fca-6479e57018d7" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216473 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="098e9322-614e-4c4a-9fca-6479e57018d7" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216490 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216497 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216507 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216515 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216535 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon-log" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216545 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon-log" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216563 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="219be0fb-9881-4029-8ab1-03a46157cd21" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216571 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="219be0fb-9881-4029-8ab1-03a46157cd21" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216580 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30ad455a-8882-418d-ac1f-4ba80ba554af" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216588 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="30ad455a-8882-418d-ac1f-4ba80ba554af" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216610 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-httpd" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216617 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-httpd" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216626 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e45e318-c0a2-473e-8390-aaa51821b3f8" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216751 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e45e318-c0a2-473e-8390-aaa51821b3f8" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216769 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216777 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.216793 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-log" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.216801 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-log" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217123 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217145 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="219be0fb-9881-4029-8ab1-03a46157cd21" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217158 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-httpd" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217169 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0566b619-da0e-49ff-b282-3d2bb8ae4fe6" containerName="horizon-log" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217185 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="30ad455a-8882-418d-ac1f-4ba80ba554af" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217198 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e45e318-c0a2-473e-8390-aaa51821b3f8" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217206 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="098e9322-614e-4c4a-9fca-6479e57018d7" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217228 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" containerName="mariadb-account-create-update" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217247 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" containerName="mariadb-database-create" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.217258 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" containerName="glance-log" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.218423 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.222594 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.222599 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.228741 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.259390 4926 scope.go:117] "RemoveContainer" containerID="12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.259877 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec\": container with ID starting with 12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec not found: ID does not exist" containerID="12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.259978 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec"} err="failed to get container status \"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec\": rpc error: code = NotFound desc = could not find container \"12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec\": container with ID starting with 12f3423e52ab3cffb68485b607504cb63dc198625ad4a3feffed0d22e0439dec not found: ID does not exist" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.260005 4926 scope.go:117] "RemoveContainer" containerID="994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7" Nov 22 10:58:31 crc kubenswrapper[4926]: E1122 10:58:31.261689 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7\": container with ID starting with 994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7 not found: ID does not exist" containerID="994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.261715 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7"} err="failed to get container status \"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7\": rpc error: code = NotFound desc = could not find container \"994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7\": container with ID starting with 994b59d90e33fa06996f4fe64553849145665a7fd4d10b6135643fc9d1d80af7 not found: ID does not exist" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341569 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341671 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lxx6\" (UniqueName: \"kubernetes.io/projected/699ad142-80cd-4ee2-86ca-87c22cc7f39b-kube-api-access-8lxx6\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341691 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-scripts\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341775 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-logs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341800 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341821 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-config-data\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.341849 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444154 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-logs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444216 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-config-data\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444275 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444338 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444401 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lxx6\" (UniqueName: \"kubernetes.io/projected/699ad142-80cd-4ee2-86ca-87c22cc7f39b-kube-api-access-8lxx6\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444419 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-scripts\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444458 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444800 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-logs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444891 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/699ad142-80cd-4ee2-86ca-87c22cc7f39b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.444949 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.449987 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.450103 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-scripts\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.450600 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-config-data\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.451418 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/699ad142-80cd-4ee2-86ca-87c22cc7f39b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.479321 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lxx6\" (UniqueName: \"kubernetes.io/projected/699ad142-80cd-4ee2-86ca-87c22cc7f39b-kube-api-access-8lxx6\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.481365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"glance-default-external-api-0\" (UID: \"699ad142-80cd-4ee2-86ca-87c22cc7f39b\") " pod="openstack/glance-default-external-api-0" Nov 22 10:58:31 crc kubenswrapper[4926]: I1122 10:58:31.561533 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 10:58:32 crc kubenswrapper[4926]: W1122 10:58:32.179509 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod699ad142_80cd_4ee2_86ca_87c22cc7f39b.slice/crio-bb458e1882746c86587b1e5cb33c5a6421c8ef5ecb8b945f3ebabec73370fd59 WatchSource:0}: Error finding container bb458e1882746c86587b1e5cb33c5a6421c8ef5ecb8b945f3ebabec73370fd59: Status 404 returned error can't find the container with id bb458e1882746c86587b1e5cb33c5a6421c8ef5ecb8b945f3ebabec73370fd59 Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.180206 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.611690 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59000497-7fb0-496d-afca-21b04b8d59e4" path="/var/lib/kubelet/pods/59000497-7fb0-496d-afca-21b04b8d59e4/volumes" Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.826718 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980369 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980477 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qp4hb\" (UniqueName: \"kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980522 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980571 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980596 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980680 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980710 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.980773 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle\") pod \"32257092-e014-4913-99e3-a92b522301e2\" (UID: \"32257092-e014-4913-99e3-a92b522301e2\") " Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.990985 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs" (OuterVolumeSpecName: "logs") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.991385 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:32 crc kubenswrapper[4926]: I1122 10:58:32.995051 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb" (OuterVolumeSpecName: "kube-api-access-qp4hb") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "kube-api-access-qp4hb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.001966 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.005794 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts" (OuterVolumeSpecName: "scripts") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.057591 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8kkgp"] Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.058301 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-log" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.058397 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-log" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.058580 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.058640 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.059019 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-log" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.059099 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="32257092-e014-4913-99e3-a92b522301e2" containerName="glance-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.060092 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.063186 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.063390 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.063716 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4ccpb" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.064693 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.074647 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8kkgp"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084823 4926 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084871 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084884 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084905 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qp4hb\" (UniqueName: \"kubernetes.io/projected/32257092-e014-4913-99e3-a92b522301e2-kube-api-access-qp4hb\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084915 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/32257092-e014-4913-99e3-a92b522301e2-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.084944 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.112608 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data" (OuterVolumeSpecName: "config-data") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.140933 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "32257092-e014-4913-99e3-a92b522301e2" (UID: "32257092-e014-4913-99e3-a92b522301e2"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.141582 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.152967 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.188783 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.188829 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.189030 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxqfb\" (UniqueName: \"kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.189136 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.189864 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.189922 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.189939 4926 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/32257092-e014-4913-99e3-a92b522301e2-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.203448 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"699ad142-80cd-4ee2-86ca-87c22cc7f39b","Type":"ContainerStarted","Data":"b33e9cd7f1eb5823551783d217056f00adba4d4e31a56ee902737d0365fd021f"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.203494 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"699ad142-80cd-4ee2-86ca-87c22cc7f39b","Type":"ContainerStarted","Data":"bb458e1882746c86587b1e5cb33c5a6421c8ef5ecb8b945f3ebabec73370fd59"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.224002 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerID="b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98" exitCode=0 Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.224123 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerDied","Data":"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.224160 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf1674fb-d82f-4bb2-9066-b51d000d311b","Type":"ContainerDied","Data":"3dc9eee4a6fbe723db0dfda96d4d801af4a4f7121e942cd48ea1b9429e48d6c3"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.224182 4926 scope.go:117] "RemoveContainer" containerID="e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.224130 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.230459 4926 generic.go:334] "Generic (PLEG): container finished" podID="32257092-e014-4913-99e3-a92b522301e2" containerID="4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195" exitCode=0 Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.230500 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerDied","Data":"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.230522 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"32257092-e014-4913-99e3-a92b522301e2","Type":"ContainerDied","Data":"8df5587a64edbe675818ae6b7dd0a47a2af4402623835fe32163ae7705ed6021"} Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.230570 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.252144 4926 scope.go:117] "RemoveContainer" containerID="3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.274638 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291143 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291193 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291232 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291283 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vssjc\" (UniqueName: \"kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291329 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291395 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291460 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd\") pod \"cf1674fb-d82f-4bb2-9066-b51d000d311b\" (UID: \"cf1674fb-d82f-4bb2-9066-b51d000d311b\") " Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291768 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291815 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291941 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxqfb\" (UniqueName: \"kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.291985 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.292707 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.296150 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc" (OuterVolumeSpecName: "kube-api-access-vssjc") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "kube-api-access-vssjc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.299613 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts" (OuterVolumeSpecName: "scripts") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.300479 4926 scope.go:117] "RemoveContainer" containerID="dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.301035 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.301332 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.303850 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.313023 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.318677 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333147 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.333672 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="sg-core" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333696 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="sg-core" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.333728 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="proxy-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333736 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="proxy-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.333747 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-central-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333755 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-central-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.333775 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-notification-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333784 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-notification-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.333998 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="proxy-httpd" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.334025 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-central-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.334037 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="ceilometer-notification-agent" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.334058 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" containerName="sg-core" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.335364 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.344473 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.344626 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxqfb\" (UniqueName: \"kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb\") pod \"nova-cell0-conductor-db-sync-8kkgp\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.344767 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.358440 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.393786 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.393821 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf1674fb-d82f-4bb2-9066-b51d000d311b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.393832 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.393843 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vssjc\" (UniqueName: \"kubernetes.io/projected/cf1674fb-d82f-4bb2-9066-b51d000d311b-kube-api-access-vssjc\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.416989 4926 scope.go:117] "RemoveContainer" containerID="b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.424570 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.459742 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.481203 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496371 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496413 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496460 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbwrx\" (UniqueName: \"kubernetes.io/projected/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-kube-api-access-vbwrx\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496797 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496907 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.496950 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.497008 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-logs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.497113 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.497203 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.497221 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.507318 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data" (OuterVolumeSpecName: "config-data") pod "cf1674fb-d82f-4bb2-9066-b51d000d311b" (UID: "cf1674fb-d82f-4bb2-9066-b51d000d311b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599490 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599547 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599568 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599605 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbwrx\" (UniqueName: \"kubernetes.io/projected/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-kube-api-access-vbwrx\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599671 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599717 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599743 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-logs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.599799 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf1674fb-d82f-4bb2-9066-b51d000d311b-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.600277 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-logs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.601220 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.607347 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.607594 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.608446 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.609847 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.627350 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.629649 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.632457 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbwrx\" (UniqueName: \"kubernetes.io/projected/eaf541eb-314b-4f78-bdcc-66f5b43b0ed5-kube-api-access-vbwrx\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.652150 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.655950 4926 scope.go:117] "RemoveContainer" containerID="e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.660106 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69\": container with ID starting with e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69 not found: ID does not exist" containerID="e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.660153 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69"} err="failed to get container status \"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69\": rpc error: code = NotFound desc = could not find container \"e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69\": container with ID starting with e0bf94b2b8eff6634f4c6fd311d65d980d13a23f91463a4ece1e8b76a166cb69 not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.660178 4926 scope.go:117] "RemoveContainer" containerID="3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.661355 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a\": container with ID starting with 3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a not found: ID does not exist" containerID="3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.661380 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a"} err="failed to get container status \"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a\": rpc error: code = NotFound desc = could not find container \"3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a\": container with ID starting with 3499d28b9a03e6e9eb833a25c129e5b769dcef0dc4c668810df0df936ca4ef2a not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.661404 4926 scope.go:117] "RemoveContainer" containerID="dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.661695 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8\": container with ID starting with dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8 not found: ID does not exist" containerID="dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.661746 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8"} err="failed to get container status \"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8\": rpc error: code = NotFound desc = could not find container \"dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8\": container with ID starting with dfcb66f769575d6206a3db58410926f980a5356d820fc5dcbed5a0cee12978f8 not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.661780 4926 scope.go:117] "RemoveContainer" containerID="b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.662514 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-internal-api-0\" (UID: \"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5\") " pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.664432 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98\": container with ID starting with b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98 not found: ID does not exist" containerID="b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.664475 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98"} err="failed to get container status \"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98\": rpc error: code = NotFound desc = could not find container \"b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98\": container with ID starting with b45cfb2cded6a8408a46525219be7da1bac3d0473e8ce848b0150bb98f209d98 not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.664500 4926 scope.go:117] "RemoveContainer" containerID="4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.665492 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.671919 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.675111 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.676152 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.676288 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.683951 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.802102 4926 scope.go:117] "RemoveContainer" containerID="4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804020 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804079 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804139 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804336 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804428 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804878 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpthv\" (UniqueName: \"kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.804969 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.837367 4926 scope.go:117] "RemoveContainer" containerID="4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.838344 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195\": container with ID starting with 4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195 not found: ID does not exist" containerID="4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.838382 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195"} err="failed to get container status \"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195\": rpc error: code = NotFound desc = could not find container \"4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195\": container with ID starting with 4ddaf34f404012355ae0eee3d7d70a108e9c3a361e079136abca04e3fe810195 not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.838430 4926 scope.go:117] "RemoveContainer" containerID="4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc" Nov 22 10:58:33 crc kubenswrapper[4926]: E1122 10:58:33.841134 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc\": container with ID starting with 4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc not found: ID does not exist" containerID="4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.841189 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc"} err="failed to get container status \"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc\": rpc error: code = NotFound desc = could not find container \"4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc\": container with ID starting with 4f890485d458c5bf8bbc0382df133030b3673ee7a83fab546f49103f6b4445fc not found: ID does not exist" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908233 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908300 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908325 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908354 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908407 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpthv\" (UniqueName: \"kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908434 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.908493 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.913202 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.913231 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.914205 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.914220 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.921228 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.921881 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:33 crc kubenswrapper[4926]: I1122 10:58:33.936820 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpthv\" (UniqueName: \"kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv\") pod \"ceilometer-0\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " pod="openstack/ceilometer-0" Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.012579 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.124493 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8kkgp"] Nov 22 10:58:34 crc kubenswrapper[4926]: W1122 10:58:34.124681 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d43169d_e199_4fe3_85d7_c39acd736eb6.slice/crio-96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958 WatchSource:0}: Error finding container 96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958: Status 404 returned error can't find the container with id 96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958 Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.245824 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"699ad142-80cd-4ee2-86ca-87c22cc7f39b","Type":"ContainerStarted","Data":"195797919dbe12fefaa5bed85085706e16d50310d62236168569e008d485d10c"} Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.259076 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" event={"ID":"6d43169d-e199-4fe3-85d7-c39acd736eb6","Type":"ContainerStarted","Data":"96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958"} Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.287659 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.287639905 podStartE2EDuration="3.287639905s" podCreationTimestamp="2025-11-22 10:58:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:34.278872255 +0000 UTC m=+1134.580477542" watchObservedRunningTime="2025-11-22 10:58:34.287639905 +0000 UTC m=+1134.589245182" Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.427642 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 10:58:34 crc kubenswrapper[4926]: W1122 10:58:34.429678 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeaf541eb_314b_4f78_bdcc_66f5b43b0ed5.slice/crio-3c7c0cec2cc7f47587ad8a4acf629fc9e7e8794208694c73c92dfca89812d555 WatchSource:0}: Error finding container 3c7c0cec2cc7f47587ad8a4acf629fc9e7e8794208694c73c92dfca89812d555: Status 404 returned error can't find the container with id 3c7c0cec2cc7f47587ad8a4acf629fc9e7e8794208694c73c92dfca89812d555 Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.524189 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.598016 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32257092-e014-4913-99e3-a92b522301e2" path="/var/lib/kubelet/pods/32257092-e014-4913-99e3-a92b522301e2/volumes" Nov 22 10:58:34 crc kubenswrapper[4926]: I1122 10:58:34.599099 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf1674fb-d82f-4bb2-9066-b51d000d311b" path="/var/lib/kubelet/pods/cf1674fb-d82f-4bb2-9066-b51d000d311b/volumes" Nov 22 10:58:35 crc kubenswrapper[4926]: I1122 10:58:35.280243 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5","Type":"ContainerStarted","Data":"c28bbc6f7cc806ddd8358796970e015a1810ac7eaba5ac84b9281f72ce6a991a"} Nov 22 10:58:35 crc kubenswrapper[4926]: I1122 10:58:35.280792 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5","Type":"ContainerStarted","Data":"3c7c0cec2cc7f47587ad8a4acf629fc9e7e8794208694c73c92dfca89812d555"} Nov 22 10:58:35 crc kubenswrapper[4926]: I1122 10:58:35.282615 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerStarted","Data":"a2d3c756d6c23f40f7822a7abd3cc13f6132bc90897bf4b1a1c7639b6319b0e2"} Nov 22 10:58:35 crc kubenswrapper[4926]: I1122 10:58:35.282635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerStarted","Data":"bfef3af0735f719f03231792214d606ea8d2fd0d55f53b137d8840170c7e6994"} Nov 22 10:58:36 crc kubenswrapper[4926]: I1122 10:58:36.298276 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"eaf541eb-314b-4f78-bdcc-66f5b43b0ed5","Type":"ContainerStarted","Data":"634ebbb6a369400b7740f4b4bd8fa52408cbded4a5fef9ae0f634d2e05a3a478"} Nov 22 10:58:36 crc kubenswrapper[4926]: I1122 10:58:36.301085 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerStarted","Data":"34a62e77dc6c4ec9336708dbe67c2e433c1ca853c216da3bc05e896cc00eef02"} Nov 22 10:58:36 crc kubenswrapper[4926]: I1122 10:58:36.323475 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.323459992 podStartE2EDuration="3.323459992s" podCreationTimestamp="2025-11-22 10:58:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:58:36.323185564 +0000 UTC m=+1136.624790851" watchObservedRunningTime="2025-11-22 10:58:36.323459992 +0000 UTC m=+1136.625065279" Nov 22 10:58:37 crc kubenswrapper[4926]: I1122 10:58:37.316147 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerStarted","Data":"61f2ee88e33e4f58aba387d424226bfd6740331d4bd33698ee27c40f419820f4"} Nov 22 10:58:39 crc kubenswrapper[4926]: I1122 10:58:39.661693 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:58:39 crc kubenswrapper[4926]: I1122 10:58:39.662391 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:58:41 crc kubenswrapper[4926]: I1122 10:58:41.561642 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:58:41 crc kubenswrapper[4926]: I1122 10:58:41.561872 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 10:58:41 crc kubenswrapper[4926]: I1122 10:58:41.601542 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:58:41 crc kubenswrapper[4926]: I1122 10:58:41.617373 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.383843 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" event={"ID":"6d43169d-e199-4fe3-85d7-c39acd736eb6","Type":"ContainerStarted","Data":"6ea34daad8e79e0f8c03f6392333f7819c2a44de3e51b71bdfef454d111c56c0"} Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.386829 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerStarted","Data":"47489090dc8745c8ff2145cf2f404ab861475737ed5ef8bc4f28c6fc792f2ef5"} Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.387590 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.387612 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.400967 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" podStartSLOduration=1.95502189 podStartE2EDuration="9.400947644s" podCreationTimestamp="2025-11-22 10:58:33 +0000 UTC" firstStartedPulling="2025-11-22 10:58:34.129030082 +0000 UTC m=+1134.430635359" lastFinishedPulling="2025-11-22 10:58:41.574955826 +0000 UTC m=+1141.876561113" observedRunningTime="2025-11-22 10:58:42.398948467 +0000 UTC m=+1142.700553754" watchObservedRunningTime="2025-11-22 10:58:42.400947644 +0000 UTC m=+1142.702552931" Nov 22 10:58:42 crc kubenswrapper[4926]: I1122 10:58:42.424062 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.414181323 podStartE2EDuration="9.424044814s" podCreationTimestamp="2025-11-22 10:58:33 +0000 UTC" firstStartedPulling="2025-11-22 10:58:34.541881182 +0000 UTC m=+1134.843486469" lastFinishedPulling="2025-11-22 10:58:41.551744673 +0000 UTC m=+1141.853349960" observedRunningTime="2025-11-22 10:58:42.418360022 +0000 UTC m=+1142.719965309" watchObservedRunningTime="2025-11-22 10:58:42.424044814 +0000 UTC m=+1142.725650101" Nov 22 10:58:43 crc kubenswrapper[4926]: I1122 10:58:43.397584 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:58:43 crc kubenswrapper[4926]: I1122 10:58:43.675915 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:43 crc kubenswrapper[4926]: I1122 10:58:43.675966 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:43 crc kubenswrapper[4926]: I1122 10:58:43.720491 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:43 crc kubenswrapper[4926]: I1122 10:58:43.722817 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:44 crc kubenswrapper[4926]: I1122 10:58:44.299910 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:44 crc kubenswrapper[4926]: I1122 10:58:44.401938 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:58:44 crc kubenswrapper[4926]: I1122 10:58:44.407142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 10:58:44 crc kubenswrapper[4926]: I1122 10:58:44.408127 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:44 crc kubenswrapper[4926]: I1122 10:58:44.408264 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:45 crc kubenswrapper[4926]: I1122 10:58:45.415354 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-central-agent" containerID="cri-o://a2d3c756d6c23f40f7822a7abd3cc13f6132bc90897bf4b1a1c7639b6319b0e2" gracePeriod=30 Nov 22 10:58:45 crc kubenswrapper[4926]: I1122 10:58:45.415767 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="proxy-httpd" containerID="cri-o://47489090dc8745c8ff2145cf2f404ab861475737ed5ef8bc4f28c6fc792f2ef5" gracePeriod=30 Nov 22 10:58:45 crc kubenswrapper[4926]: I1122 10:58:45.415814 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="sg-core" containerID="cri-o://61f2ee88e33e4f58aba387d424226bfd6740331d4bd33698ee27c40f419820f4" gracePeriod=30 Nov 22 10:58:45 crc kubenswrapper[4926]: I1122 10:58:45.415844 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-notification-agent" containerID="cri-o://34a62e77dc6c4ec9336708dbe67c2e433c1ca853c216da3bc05e896cc00eef02" gracePeriod=30 Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.339772 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.340497 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449097 4926 generic.go:334] "Generic (PLEG): container finished" podID="23e62da3-97c2-497e-bf38-92860b31f010" containerID="47489090dc8745c8ff2145cf2f404ab861475737ed5ef8bc4f28c6fc792f2ef5" exitCode=0 Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449130 4926 generic.go:334] "Generic (PLEG): container finished" podID="23e62da3-97c2-497e-bf38-92860b31f010" containerID="61f2ee88e33e4f58aba387d424226bfd6740331d4bd33698ee27c40f419820f4" exitCode=2 Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449139 4926 generic.go:334] "Generic (PLEG): container finished" podID="23e62da3-97c2-497e-bf38-92860b31f010" containerID="a2d3c756d6c23f40f7822a7abd3cc13f6132bc90897bf4b1a1c7639b6319b0e2" exitCode=0 Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449353 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerDied","Data":"47489090dc8745c8ff2145cf2f404ab861475737ed5ef8bc4f28c6fc792f2ef5"} Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449439 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerDied","Data":"61f2ee88e33e4f58aba387d424226bfd6740331d4bd33698ee27c40f419820f4"} Nov 22 10:58:46 crc kubenswrapper[4926]: I1122 10:58:46.449455 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerDied","Data":"a2d3c756d6c23f40f7822a7abd3cc13f6132bc90897bf4b1a1c7639b6319b0e2"} Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.465739 4926 generic.go:334] "Generic (PLEG): container finished" podID="23e62da3-97c2-497e-bf38-92860b31f010" containerID="34a62e77dc6c4ec9336708dbe67c2e433c1ca853c216da3bc05e896cc00eef02" exitCode=0 Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.465812 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerDied","Data":"34a62e77dc6c4ec9336708dbe67c2e433c1ca853c216da3bc05e896cc00eef02"} Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.597015 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.702989 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703085 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703122 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpthv\" (UniqueName: \"kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703175 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703325 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703364 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703426 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml\") pod \"23e62da3-97c2-497e-bf38-92860b31f010\" (UID: \"23e62da3-97c2-497e-bf38-92860b31f010\") " Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703564 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.703945 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.704023 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.709401 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv" (OuterVolumeSpecName: "kube-api-access-cpthv") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "kube-api-access-cpthv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.723124 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts" (OuterVolumeSpecName: "scripts") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.732662 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.799797 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805067 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805095 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805106 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805113 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/23e62da3-97c2-497e-bf38-92860b31f010-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805123 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpthv\" (UniqueName: \"kubernetes.io/projected/23e62da3-97c2-497e-bf38-92860b31f010-kube-api-access-cpthv\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.805135 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data" (OuterVolumeSpecName: "config-data") pod "23e62da3-97c2-497e-bf38-92860b31f010" (UID: "23e62da3-97c2-497e-bf38-92860b31f010"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:48 crc kubenswrapper[4926]: I1122 10:58:48.906368 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/23e62da3-97c2-497e-bf38-92860b31f010-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.478143 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"23e62da3-97c2-497e-bf38-92860b31f010","Type":"ContainerDied","Data":"bfef3af0735f719f03231792214d606ea8d2fd0d55f53b137d8840170c7e6994"} Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.478205 4926 scope.go:117] "RemoveContainer" containerID="47489090dc8745c8ff2145cf2f404ab861475737ed5ef8bc4f28c6fc792f2ef5" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.478220 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.513484 4926 scope.go:117] "RemoveContainer" containerID="61f2ee88e33e4f58aba387d424226bfd6740331d4bd33698ee27c40f419820f4" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.515544 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.523294 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.529915 4926 scope.go:117] "RemoveContainer" containerID="34a62e77dc6c4ec9336708dbe67c2e433c1ca853c216da3bc05e896cc00eef02" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.540466 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4926]: E1122 10:58:49.540990 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541021 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4926]: E1122 10:58:49.541040 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-central-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541047 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-central-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: E1122 10:58:49.541069 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541075 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4926]: E1122 10:58:49.541087 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541093 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541262 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-notification-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541275 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="sg-core" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541291 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="ceilometer-central-agent" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.541308 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="23e62da3-97c2-497e-bf38-92860b31f010" containerName="proxy-httpd" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.542828 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.550242 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.550261 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.555191 4926 scope.go:117] "RemoveContainer" containerID="a2d3c756d6c23f40f7822a7abd3cc13f6132bc90897bf4b1a1c7639b6319b0e2" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.559123 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.617784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618132 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618279 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618305 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45dld\" (UniqueName: \"kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618376 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.618548 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.719995 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.720094 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.720124 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.720153 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.720756 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.720975 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.721101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.721125 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45dld\" (UniqueName: \"kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.721312 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.726219 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.730193 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.730602 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.734834 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.738294 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45dld\" (UniqueName: \"kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld\") pod \"ceilometer-0\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " pod="openstack/ceilometer-0" Nov 22 10:58:49 crc kubenswrapper[4926]: I1122 10:58:49.860014 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:58:50 crc kubenswrapper[4926]: W1122 10:58:50.314406 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf163d93_140e_4b0d_bcc4_9fffee4712b3.slice/crio-4c7382f67b374fff8bfed644be89d9a850137bb6f8104e8d044fe866525f2523 WatchSource:0}: Error finding container 4c7382f67b374fff8bfed644be89d9a850137bb6f8104e8d044fe866525f2523: Status 404 returned error can't find the container with id 4c7382f67b374fff8bfed644be89d9a850137bb6f8104e8d044fe866525f2523 Nov 22 10:58:50 crc kubenswrapper[4926]: I1122 10:58:50.317363 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:58:50 crc kubenswrapper[4926]: I1122 10:58:50.491227 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerStarted","Data":"4c7382f67b374fff8bfed644be89d9a850137bb6f8104e8d044fe866525f2523"} Nov 22 10:58:50 crc kubenswrapper[4926]: I1122 10:58:50.594908 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23e62da3-97c2-497e-bf38-92860b31f010" path="/var/lib/kubelet/pods/23e62da3-97c2-497e-bf38-92860b31f010/volumes" Nov 22 10:58:51 crc kubenswrapper[4926]: I1122 10:58:51.504997 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerStarted","Data":"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b"} Nov 22 10:58:53 crc kubenswrapper[4926]: I1122 10:58:53.526409 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerStarted","Data":"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502"} Nov 22 10:58:54 crc kubenswrapper[4926]: I1122 10:58:54.537661 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerStarted","Data":"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2"} Nov 22 10:58:55 crc kubenswrapper[4926]: I1122 10:58:55.551717 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerStarted","Data":"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9"} Nov 22 10:58:55 crc kubenswrapper[4926]: I1122 10:58:55.552429 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:58:55 crc kubenswrapper[4926]: I1122 10:58:55.590625 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.996404493 podStartE2EDuration="6.59060209s" podCreationTimestamp="2025-11-22 10:58:49 +0000 UTC" firstStartedPulling="2025-11-22 10:58:50.316507421 +0000 UTC m=+1150.618112708" lastFinishedPulling="2025-11-22 10:58:54.910705028 +0000 UTC m=+1155.212310305" observedRunningTime="2025-11-22 10:58:55.581308085 +0000 UTC m=+1155.882913372" watchObservedRunningTime="2025-11-22 10:58:55.59060209 +0000 UTC m=+1155.892207377" Nov 22 10:58:57 crc kubenswrapper[4926]: I1122 10:58:57.574361 4926 generic.go:334] "Generic (PLEG): container finished" podID="6d43169d-e199-4fe3-85d7-c39acd736eb6" containerID="6ea34daad8e79e0f8c03f6392333f7819c2a44de3e51b71bdfef454d111c56c0" exitCode=0 Nov 22 10:58:57 crc kubenswrapper[4926]: I1122 10:58:57.574491 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" event={"ID":"6d43169d-e199-4fe3-85d7-c39acd736eb6","Type":"ContainerDied","Data":"6ea34daad8e79e0f8c03f6392333f7819c2a44de3e51b71bdfef454d111c56c0"} Nov 22 10:58:58 crc kubenswrapper[4926]: I1122 10:58:58.928655 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.011300 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle\") pod \"6d43169d-e199-4fe3-85d7-c39acd736eb6\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.011368 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts\") pod \"6d43169d-e199-4fe3-85d7-c39acd736eb6\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.011399 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data\") pod \"6d43169d-e199-4fe3-85d7-c39acd736eb6\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.011764 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxqfb\" (UniqueName: \"kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb\") pod \"6d43169d-e199-4fe3-85d7-c39acd736eb6\" (UID: \"6d43169d-e199-4fe3-85d7-c39acd736eb6\") " Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.016908 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts" (OuterVolumeSpecName: "scripts") pod "6d43169d-e199-4fe3-85d7-c39acd736eb6" (UID: "6d43169d-e199-4fe3-85d7-c39acd736eb6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.016975 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb" (OuterVolumeSpecName: "kube-api-access-fxqfb") pod "6d43169d-e199-4fe3-85d7-c39acd736eb6" (UID: "6d43169d-e199-4fe3-85d7-c39acd736eb6"). InnerVolumeSpecName "kube-api-access-fxqfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.038955 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data" (OuterVolumeSpecName: "config-data") pod "6d43169d-e199-4fe3-85d7-c39acd736eb6" (UID: "6d43169d-e199-4fe3-85d7-c39acd736eb6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.056996 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d43169d-e199-4fe3-85d7-c39acd736eb6" (UID: "6d43169d-e199-4fe3-85d7-c39acd736eb6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.113807 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxqfb\" (UniqueName: \"kubernetes.io/projected/6d43169d-e199-4fe3-85d7-c39acd736eb6-kube-api-access-fxqfb\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.113845 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.113856 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.113866 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6d43169d-e199-4fe3-85d7-c39acd736eb6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.617098 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" event={"ID":"6d43169d-e199-4fe3-85d7-c39acd736eb6","Type":"ContainerDied","Data":"96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958"} Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.617831 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96deed8e1761c294134d1354dcc3362dfe2e45a3afc2f3daf18312d8e8e62958" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.617433 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8kkgp" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.696424 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 10:58:59 crc kubenswrapper[4926]: E1122 10:58:59.696765 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d43169d-e199-4fe3-85d7-c39acd736eb6" containerName="nova-cell0-conductor-db-sync" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.696780 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d43169d-e199-4fe3-85d7-c39acd736eb6" containerName="nova-cell0-conductor-db-sync" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.696958 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d43169d-e199-4fe3-85d7-c39acd736eb6" containerName="nova-cell0-conductor-db-sync" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.697516 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.707344 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.707538 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-4ccpb" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.716209 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.827285 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdw7c\" (UniqueName: \"kubernetes.io/projected/cb0852ee-dc75-43ee-88ec-7343197eca5f-kube-api-access-pdw7c\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.827390 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.827466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.928976 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.929129 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdw7c\" (UniqueName: \"kubernetes.io/projected/cb0852ee-dc75-43ee-88ec-7343197eca5f-kube-api-access-pdw7c\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.929203 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.938421 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.940064 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb0852ee-dc75-43ee-88ec-7343197eca5f-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:58:59 crc kubenswrapper[4926]: I1122 10:58:59.969024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdw7c\" (UniqueName: \"kubernetes.io/projected/cb0852ee-dc75-43ee-88ec-7343197eca5f-kube-api-access-pdw7c\") pod \"nova-cell0-conductor-0\" (UID: \"cb0852ee-dc75-43ee-88ec-7343197eca5f\") " pod="openstack/nova-cell0-conductor-0" Nov 22 10:59:00 crc kubenswrapper[4926]: I1122 10:59:00.024974 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 10:59:00 crc kubenswrapper[4926]: I1122 10:59:00.484883 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 10:59:00 crc kubenswrapper[4926]: I1122 10:59:00.629078 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"cb0852ee-dc75-43ee-88ec-7343197eca5f","Type":"ContainerStarted","Data":"ae72a0c2e14c8e324dd85b24fd180f6439f28cf88e13dd43726f35d983f3dd7a"} Nov 22 10:59:01 crc kubenswrapper[4926]: I1122 10:59:01.645420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"cb0852ee-dc75-43ee-88ec-7343197eca5f","Type":"ContainerStarted","Data":"f2106c92d42b1cd6c537d370decd5e6a647710772c01d0f0223629ab0fe15321"} Nov 22 10:59:01 crc kubenswrapper[4926]: I1122 10:59:01.645574 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 22 10:59:01 crc kubenswrapper[4926]: I1122 10:59:01.668173 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.668154564 podStartE2EDuration="2.668154564s" podCreationTimestamp="2025-11-22 10:58:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:01.667256088 +0000 UTC m=+1161.968861395" watchObservedRunningTime="2025-11-22 10:59:01.668154564 +0000 UTC m=+1161.969759861" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.072414 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.675972 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-kwhs5"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.677447 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.679962 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.680153 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.694830 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kwhs5"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.846828 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.848438 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.851594 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.862461 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.880265 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.888222 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.888561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.888768 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxhz7\" (UniqueName: \"kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.888937 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q76sl\" (UniqueName: \"kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.889088 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.889238 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.889362 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.894441 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.895960 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.899394 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.901110 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990400 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxhz7\" (UniqueName: \"kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990459 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q76sl\" (UniqueName: \"kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990516 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990564 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990586 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990608 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990629 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990658 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tmgs\" (UniqueName: \"kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990701 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.990744 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.991428 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.999108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:05 crc kubenswrapper[4926]: I1122 10:59:05.999112 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.002032 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.002475 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.002742 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.012783 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxhz7\" (UniqueName: \"kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7\") pod \"nova-cell0-cell-mapping-kwhs5\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.013319 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q76sl\" (UniqueName: \"kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl\") pod \"nova-api-0\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " pod="openstack/nova-api-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.014586 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.075741 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.077266 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.080354 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.092846 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.092922 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.092958 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tmgs\" (UniqueName: \"kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.092989 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.093010 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.093074 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc4cp\" (UniqueName: \"kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.101777 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.108048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.141544 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tmgs\" (UniqueName: \"kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs\") pod \"nova-cell1-novncproxy-0\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.171626 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.174880 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.179297 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.190156 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197477 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197632 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197659 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pstgt\" (UniqueName: \"kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197680 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197769 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc4cp\" (UniqueName: \"kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197801 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.197846 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.213673 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.222407 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.227030 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.244978 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc4cp\" (UniqueName: \"kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp\") pod \"nova-scheduler-0\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.248816 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.259823 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.277206 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.279831 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.295749 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301091 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301170 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pstgt\" (UniqueName: \"kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301204 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301250 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301274 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301298 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301345 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4pqd\" (UniqueName: \"kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301367 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.301433 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.302128 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.309482 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.311839 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.318350 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pstgt\" (UniqueName: \"kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt\") pod \"nova-metadata-0\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.319249 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.329363 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402044 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402376 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402433 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402459 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4pqd\" (UniqueName: \"kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.402560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.403278 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.403300 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.403830 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.404401 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.404951 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.422859 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4pqd\" (UniqueName: \"kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd\") pod \"dnsmasq-dns-865f5d856f-l7kzb\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.638527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.675529 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-kwhs5"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.686994 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vwr2z"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.688939 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.692099 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.692101 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.709140 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kwhs5" event={"ID":"85f46bca-0fff-445a-9c52-c0f4478105ff","Type":"ContainerStarted","Data":"6fb63d1430992c076e783b0037cd367f2548ec1af63a474337ae7bdbb9c06107"} Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.709996 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.710039 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.710098 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7gqc\" (UniqueName: \"kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.710165 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.712780 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vwr2z"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.812366 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7gqc\" (UniqueName: \"kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.812434 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.812551 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.812573 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.818384 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.818674 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.820279 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.837489 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.838092 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7gqc\" (UniqueName: \"kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc\") pod \"nova-cell1-conductor-db-sync-vwr2z\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:06 crc kubenswrapper[4926]: W1122 10:59:06.845864 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b805a48_aee9_4243_b9eb_32bb0f948260.slice/crio-7c9d599b6159c7fef83334f91a7ef154173a1f74f865cefeb45d3d857b0ec5b7 WatchSource:0}: Error finding container 7c9d599b6159c7fef83334f91a7ef154173a1f74f865cefeb45d3d857b0ec5b7: Status 404 returned error can't find the container with id 7c9d599b6159c7fef83334f91a7ef154173a1f74f865cefeb45d3d857b0ec5b7 Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.949081 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.959481 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: I1122 10:59:06.975959 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:06 crc kubenswrapper[4926]: W1122 10:59:06.984208 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05b9b91d_44f6_47d0_b509_eb056c022a0d.slice/crio-44f98768d5e72b6253d3ba3602a4db3b40e099f729ba8079707c5e5e2c5f1bac WatchSource:0}: Error finding container 44f98768d5e72b6253d3ba3602a4db3b40e099f729ba8079707c5e5e2c5f1bac: Status 404 returned error can't find the container with id 44f98768d5e72b6253d3ba3602a4db3b40e099f729ba8079707c5e5e2c5f1bac Nov 22 10:59:06 crc kubenswrapper[4926]: W1122 10:59:06.990747 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode65e0256_cba0_499f_bea4_9f87cac3ffd4.slice/crio-1971c13ac23cdf85420c376305e5e7065a7c13de3ddc6cf3829b4c79bb6e5a9b WatchSource:0}: Error finding container 1971c13ac23cdf85420c376305e5e7065a7c13de3ddc6cf3829b4c79bb6e5a9b: Status 404 returned error can't find the container with id 1971c13ac23cdf85420c376305e5e7065a7c13de3ddc6cf3829b4c79bb6e5a9b Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.011293 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.170411 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.563170 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vwr2z"] Nov 22 10:59:07 crc kubenswrapper[4926]: W1122 10:59:07.572290 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89f5e6c8_c807_4ec1_80d1_7b0ac0192d38.slice/crio-373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf WatchSource:0}: Error finding container 373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf: Status 404 returned error can't find the container with id 373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.722403 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1ad9946b-4e9e-4366-9a71-da0887c1b083","Type":"ContainerStarted","Data":"b2c41d56a0232d6d8c14854cefdbb063e64ae73b234d16ee7db4202d2741bb74"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.723979 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" event={"ID":"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38","Type":"ContainerStarted","Data":"373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.727268 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kwhs5" event={"ID":"85f46bca-0fff-445a-9c52-c0f4478105ff","Type":"ContainerStarted","Data":"cffb02ee83fc4e92af6189933f13586d0707da9320fab4ca476129e6fbc57eee"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.730655 4926 generic.go:334] "Generic (PLEG): container finished" podID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerID="777af81e7a14c41a8cb6c96828eba45f9d709c04037bca5f9fe9a335250e7712" exitCode=0 Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.730748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" event={"ID":"c1058caf-33f4-4f00-bb1f-fe789d442b8d","Type":"ContainerDied","Data":"777af81e7a14c41a8cb6c96828eba45f9d709c04037bca5f9fe9a335250e7712"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.730780 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" event={"ID":"c1058caf-33f4-4f00-bb1f-fe789d442b8d","Type":"ContainerStarted","Data":"19e0620b5a4a31d9759740eb452cd7de858a893d544e9f69dcb49c0854fb2239"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.735213 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerStarted","Data":"7c9d599b6159c7fef83334f91a7ef154173a1f74f865cefeb45d3d857b0ec5b7"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.737120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"05b9b91d-44f6-47d0-b509-eb056c022a0d","Type":"ContainerStarted","Data":"44f98768d5e72b6253d3ba3602a4db3b40e099f729ba8079707c5e5e2c5f1bac"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.742000 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerStarted","Data":"1971c13ac23cdf85420c376305e5e7065a7c13de3ddc6cf3829b4c79bb6e5a9b"} Nov 22 10:59:07 crc kubenswrapper[4926]: I1122 10:59:07.749532 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-kwhs5" podStartSLOduration=2.749509574 podStartE2EDuration="2.749509574s" podCreationTimestamp="2025-11-22 10:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:07.744599843 +0000 UTC m=+1168.046205130" watchObservedRunningTime="2025-11-22 10:59:07.749509574 +0000 UTC m=+1168.051114871" Nov 22 10:59:08 crc kubenswrapper[4926]: I1122 10:59:08.753860 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" event={"ID":"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38","Type":"ContainerStarted","Data":"09d69004abeb128e3b90c88330ccebdea80ff6ca598bdbce8e44986c3614821f"} Nov 22 10:59:08 crc kubenswrapper[4926]: I1122 10:59:08.768228 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" event={"ID":"c1058caf-33f4-4f00-bb1f-fe789d442b8d","Type":"ContainerStarted","Data":"6b87219a2657302c8f7393f9f99a82078ce754798e7fafadb126d2f9454f7919"} Nov 22 10:59:08 crc kubenswrapper[4926]: I1122 10:59:08.768521 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:08 crc kubenswrapper[4926]: I1122 10:59:08.780062 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" podStartSLOduration=2.780040406 podStartE2EDuration="2.780040406s" podCreationTimestamp="2025-11-22 10:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:08.771305526 +0000 UTC m=+1169.072910823" watchObservedRunningTime="2025-11-22 10:59:08.780040406 +0000 UTC m=+1169.081645693" Nov 22 10:59:08 crc kubenswrapper[4926]: I1122 10:59:08.818284 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" podStartSLOduration=2.818262872 podStartE2EDuration="2.818262872s" podCreationTimestamp="2025-11-22 10:59:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:08.812436255 +0000 UTC m=+1169.114041542" watchObservedRunningTime="2025-11-22 10:59:08.818262872 +0000 UTC m=+1169.119868159" Nov 22 10:59:09 crc kubenswrapper[4926]: I1122 10:59:09.453539 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:09 crc kubenswrapper[4926]: I1122 10:59:09.468636 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:09 crc kubenswrapper[4926]: I1122 10:59:09.661603 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:59:09 crc kubenswrapper[4926]: I1122 10:59:09.661678 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.795548 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerStarted","Data":"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.796255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerStarted","Data":"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.797716 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"05b9b91d-44f6-47d0-b509-eb056c022a0d","Type":"ContainerStarted","Data":"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.801393 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerStarted","Data":"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.801450 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerStarted","Data":"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.801621 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-log" containerID="cri-o://b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" gracePeriod=30 Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.801744 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-metadata" containerID="cri-o://2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" gracePeriod=30 Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.805724 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1ad9946b-4e9e-4366-9a71-da0887c1b083","Type":"ContainerStarted","Data":"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee"} Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.806308 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="1ad9946b-4e9e-4366-9a71-da0887c1b083" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee" gracePeriod=30 Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.827660 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.099682513 podStartE2EDuration="6.827639815s" podCreationTimestamp="2025-11-22 10:59:05 +0000 UTC" firstStartedPulling="2025-11-22 10:59:06.848257847 +0000 UTC m=+1167.149863144" lastFinishedPulling="2025-11-22 10:59:10.576215159 +0000 UTC m=+1170.877820446" observedRunningTime="2025-11-22 10:59:11.819248274 +0000 UTC m=+1172.120853571" watchObservedRunningTime="2025-11-22 10:59:11.827639815 +0000 UTC m=+1172.129245102" Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.840433 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.25131341 podStartE2EDuration="5.840415031s" podCreationTimestamp="2025-11-22 10:59:06 +0000 UTC" firstStartedPulling="2025-11-22 10:59:06.987689974 +0000 UTC m=+1167.289295251" lastFinishedPulling="2025-11-22 10:59:10.576791585 +0000 UTC m=+1170.878396872" observedRunningTime="2025-11-22 10:59:11.838382292 +0000 UTC m=+1172.139987589" watchObservedRunningTime="2025-11-22 10:59:11.840415031 +0000 UTC m=+1172.142020318" Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.887502 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.265420424 podStartE2EDuration="5.887460949s" podCreationTimestamp="2025-11-22 10:59:06 +0000 UTC" firstStartedPulling="2025-11-22 10:59:06.992139482 +0000 UTC m=+1167.293744759" lastFinishedPulling="2025-11-22 10:59:10.614179987 +0000 UTC m=+1170.915785284" observedRunningTime="2025-11-22 10:59:11.884053742 +0000 UTC m=+1172.185659039" watchObservedRunningTime="2025-11-22 10:59:11.887460949 +0000 UTC m=+1172.189066236" Nov 22 10:59:11 crc kubenswrapper[4926]: I1122 10:59:11.896212 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.271937391 podStartE2EDuration="6.89619269s" podCreationTimestamp="2025-11-22 10:59:05 +0000 UTC" firstStartedPulling="2025-11-22 10:59:06.951436105 +0000 UTC m=+1167.253041392" lastFinishedPulling="2025-11-22 10:59:10.575691404 +0000 UTC m=+1170.877296691" observedRunningTime="2025-11-22 10:59:11.861495185 +0000 UTC m=+1172.163100502" watchObservedRunningTime="2025-11-22 10:59:11.89619269 +0000 UTC m=+1172.197797977" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.381528 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.537267 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle\") pod \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.537307 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pstgt\" (UniqueName: \"kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt\") pod \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.537340 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs\") pod \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.537869 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs" (OuterVolumeSpecName: "logs") pod "e65e0256-cba0-499f-bea4-9f87cac3ffd4" (UID: "e65e0256-cba0-499f-bea4-9f87cac3ffd4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.537997 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data\") pod \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\" (UID: \"e65e0256-cba0-499f-bea4-9f87cac3ffd4\") " Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.539254 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e65e0256-cba0-499f-bea4-9f87cac3ffd4-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.545316 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt" (OuterVolumeSpecName: "kube-api-access-pstgt") pod "e65e0256-cba0-499f-bea4-9f87cac3ffd4" (UID: "e65e0256-cba0-499f-bea4-9f87cac3ffd4"). InnerVolumeSpecName "kube-api-access-pstgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.569881 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e65e0256-cba0-499f-bea4-9f87cac3ffd4" (UID: "e65e0256-cba0-499f-bea4-9f87cac3ffd4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.577097 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data" (OuterVolumeSpecName: "config-data") pod "e65e0256-cba0-499f-bea4-9f87cac3ffd4" (UID: "e65e0256-cba0-499f-bea4-9f87cac3ffd4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.640833 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.640867 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65e0256-cba0-499f-bea4-9f87cac3ffd4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.640877 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pstgt\" (UniqueName: \"kubernetes.io/projected/e65e0256-cba0-499f-bea4-9f87cac3ffd4-kube-api-access-pstgt\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.816824 4926 generic.go:334] "Generic (PLEG): container finished" podID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerID="2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" exitCode=0 Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.816856 4926 generic.go:334] "Generic (PLEG): container finished" podID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerID="b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" exitCode=143 Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.816905 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.816913 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerDied","Data":"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c"} Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.816994 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerDied","Data":"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d"} Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.817018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"e65e0256-cba0-499f-bea4-9f87cac3ffd4","Type":"ContainerDied","Data":"1971c13ac23cdf85420c376305e5e7065a7c13de3ddc6cf3829b4c79bb6e5a9b"} Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.817042 4926 scope.go:117] "RemoveContainer" containerID="2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.857327 4926 scope.go:117] "RemoveContainer" containerID="b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.864460 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.874746 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.897945 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:12 crc kubenswrapper[4926]: E1122 10:59:12.898695 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-metadata" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.898717 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-metadata" Nov 22 10:59:12 crc kubenswrapper[4926]: E1122 10:59:12.898773 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-log" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.898782 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-log" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.899044 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-metadata" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.899073 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" containerName="nova-metadata-log" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.900310 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.903493 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.903568 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.906124 4926 scope.go:117] "RemoveContainer" containerID="2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" Nov 22 10:59:12 crc kubenswrapper[4926]: E1122 10:59:12.910008 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c\": container with ID starting with 2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c not found: ID does not exist" containerID="2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.910042 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c"} err="failed to get container status \"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c\": rpc error: code = NotFound desc = could not find container \"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c\": container with ID starting with 2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c not found: ID does not exist" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.910066 4926 scope.go:117] "RemoveContainer" containerID="b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" Nov 22 10:59:12 crc kubenswrapper[4926]: E1122 10:59:12.913351 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d\": container with ID starting with b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d not found: ID does not exist" containerID="b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.913398 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d"} err="failed to get container status \"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d\": rpc error: code = NotFound desc = could not find container \"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d\": container with ID starting with b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d not found: ID does not exist" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.913429 4926 scope.go:117] "RemoveContainer" containerID="2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.926687 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c"} err="failed to get container status \"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c\": rpc error: code = NotFound desc = could not find container \"2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c\": container with ID starting with 2d669f50d984e0bcf4683e94103b6a65f85d3af7df2fe7fca716e1f4a4d3f83c not found: ID does not exist" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.926738 4926 scope.go:117] "RemoveContainer" containerID="b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.927342 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d"} err="failed to get container status \"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d\": rpc error: code = NotFound desc = could not find container \"b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d\": container with ID starting with b6831530ed3fee2fca029905bb9ab18a8c40adb5504b9674ce53804ed422db5d not found: ID does not exist" Nov 22 10:59:12 crc kubenswrapper[4926]: I1122 10:59:12.938881 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.049236 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.049396 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.049440 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq76q\" (UniqueName: \"kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.049483 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.049618 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.150658 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.150762 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq76q\" (UniqueName: \"kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.150793 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.150827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.151001 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.152074 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.156770 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.162355 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.167218 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.175423 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq76q\" (UniqueName: \"kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q\") pod \"nova-metadata-0\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.237707 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.739617 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:13 crc kubenswrapper[4926]: W1122 10:59:13.743569 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5ede692_d924_48be_a7c5_b80104345b9c.slice/crio-c3c4d70f91b4222abac22567ea6629e5536975efac496e78ab96d05a83d88c0f WatchSource:0}: Error finding container c3c4d70f91b4222abac22567ea6629e5536975efac496e78ab96d05a83d88c0f: Status 404 returned error can't find the container with id c3c4d70f91b4222abac22567ea6629e5536975efac496e78ab96d05a83d88c0f Nov 22 10:59:13 crc kubenswrapper[4926]: I1122 10:59:13.830052 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerStarted","Data":"c3c4d70f91b4222abac22567ea6629e5536975efac496e78ab96d05a83d88c0f"} Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.594607 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e65e0256-cba0-499f-bea4-9f87cac3ffd4" path="/var/lib/kubelet/pods/e65e0256-cba0-499f-bea4-9f87cac3ffd4/volumes" Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.841998 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerStarted","Data":"da30d6c4e08a9b8cabde8f5f1bbb858dd5af689020b473d823ae4c6bae8bef09"} Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.842060 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerStarted","Data":"ac75a2dbee16f6ef89ef43b57a95e669b9ad6506e66da77565f22a8909edd9e0"} Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.844395 4926 generic.go:334] "Generic (PLEG): container finished" podID="85f46bca-0fff-445a-9c52-c0f4478105ff" containerID="cffb02ee83fc4e92af6189933f13586d0707da9320fab4ca476129e6fbc57eee" exitCode=0 Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.844446 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kwhs5" event={"ID":"85f46bca-0fff-445a-9c52-c0f4478105ff","Type":"ContainerDied","Data":"cffb02ee83fc4e92af6189933f13586d0707da9320fab4ca476129e6fbc57eee"} Nov 22 10:59:14 crc kubenswrapper[4926]: I1122 10:59:14.864932 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.864917726 podStartE2EDuration="2.864917726s" podCreationTimestamp="2025-11-22 10:59:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:14.861620541 +0000 UTC m=+1175.163225838" watchObservedRunningTime="2025-11-22 10:59:14.864917726 +0000 UTC m=+1175.166523013" Nov 22 10:59:15 crc kubenswrapper[4926]: I1122 10:59:15.855372 4926 generic.go:334] "Generic (PLEG): container finished" podID="89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" containerID="09d69004abeb128e3b90c88330ccebdea80ff6ca598bdbce8e44986c3614821f" exitCode=0 Nov 22 10:59:15 crc kubenswrapper[4926]: I1122 10:59:15.855470 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" event={"ID":"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38","Type":"ContainerDied","Data":"09d69004abeb128e3b90c88330ccebdea80ff6ca598bdbce8e44986c3614821f"} Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.180182 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.180562 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.194943 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.212675 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle\") pod \"85f46bca-0fff-445a-9c52-c0f4478105ff\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.212822 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxhz7\" (UniqueName: \"kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7\") pod \"85f46bca-0fff-445a-9c52-c0f4478105ff\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.212956 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data\") pod \"85f46bca-0fff-445a-9c52-c0f4478105ff\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.212999 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts\") pod \"85f46bca-0fff-445a-9c52-c0f4478105ff\" (UID: \"85f46bca-0fff-445a-9c52-c0f4478105ff\") " Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.218584 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts" (OuterVolumeSpecName: "scripts") pod "85f46bca-0fff-445a-9c52-c0f4478105ff" (UID: "85f46bca-0fff-445a-9c52-c0f4478105ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.224264 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.232624 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7" (OuterVolumeSpecName: "kube-api-access-fxhz7") pod "85f46bca-0fff-445a-9c52-c0f4478105ff" (UID: "85f46bca-0fff-445a-9c52-c0f4478105ff"). InnerVolumeSpecName "kube-api-access-fxhz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.254026 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "85f46bca-0fff-445a-9c52-c0f4478105ff" (UID: "85f46bca-0fff-445a-9c52-c0f4478105ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.258051 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data" (OuterVolumeSpecName: "config-data") pod "85f46bca-0fff-445a-9c52-c0f4478105ff" (UID: "85f46bca-0fff-445a-9c52-c0f4478105ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.314790 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.315029 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxhz7\" (UniqueName: \"kubernetes.io/projected/85f46bca-0fff-445a-9c52-c0f4478105ff-kube-api-access-fxhz7\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.315102 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.315157 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/85f46bca-0fff-445a-9c52-c0f4478105ff-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.319566 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.319959 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.366241 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.642146 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.713042 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.713558 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="dnsmasq-dns" containerID="cri-o://156380288bb350c0ba1cffa92a44e6229c6cba775887be81ba0dc3cc886efefa" gracePeriod=10 Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.781100 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.165:5353: connect: connection refused" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.869202 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-kwhs5" event={"ID":"85f46bca-0fff-445a-9c52-c0f4478105ff","Type":"ContainerDied","Data":"6fb63d1430992c076e783b0037cd367f2548ec1af63a474337ae7bdbb9c06107"} Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.869237 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-kwhs5" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.869245 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fb63d1430992c076e783b0037cd367f2548ec1af63a474337ae7bdbb9c06107" Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.882152 4926 generic.go:334] "Generic (PLEG): container finished" podID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerID="156380288bb350c0ba1cffa92a44e6229c6cba775887be81ba0dc3cc886efefa" exitCode=0 Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.882964 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" event={"ID":"0667807b-dd56-4e0b-b2b7-9936b24ad975","Type":"ContainerDied","Data":"156380288bb350c0ba1cffa92a44e6229c6cba775887be81ba0dc3cc886efefa"} Nov 22 10:59:16 crc kubenswrapper[4926]: I1122 10:59:16.936142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.034738 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.034974 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-log" containerID="cri-o://75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.035156 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-api" containerID="cri-o://70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.042157 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": EOF" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.042155 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": EOF" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.067814 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.068086 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-log" containerID="cri-o://ac75a2dbee16f6ef89ef43b57a95e669b9ad6506e66da77565f22a8909edd9e0" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.068292 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-metadata" containerID="cri-o://da30d6c4e08a9b8cabde8f5f1bbb858dd5af689020b473d823ae4c6bae8bef09" gracePeriod=30 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.455239 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.462636 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.530416 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537376 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537401 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537459 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts\") pod \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537518 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537574 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data\") pod \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537616 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537640 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle\") pod \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z6plr\" (UniqueName: \"kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr\") pod \"0667807b-dd56-4e0b-b2b7-9936b24ad975\" (UID: \"0667807b-dd56-4e0b-b2b7-9936b24ad975\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.537788 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7gqc\" (UniqueName: \"kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc\") pod \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\" (UID: \"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38\") " Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.554938 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr" (OuterVolumeSpecName: "kube-api-access-z6plr") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "kube-api-access-z6plr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.555030 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc" (OuterVolumeSpecName: "kube-api-access-q7gqc") pod "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" (UID: "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38"). InnerVolumeSpecName "kube-api-access-q7gqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.579543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts" (OuterVolumeSpecName: "scripts") pod "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" (UID: "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.615988 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.625964 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config" (OuterVolumeSpecName: "config") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.627405 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data" (OuterVolumeSpecName: "config-data") pod "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" (UID: "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.628100 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" (UID: "89f5e6c8-c807-4ec1-80d1-7b0ac0192d38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.639328 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7gqc\" (UniqueName: \"kubernetes.io/projected/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-kube-api-access-q7gqc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640121 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640224 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640290 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640349 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640425 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640659 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z6plr\" (UniqueName: \"kubernetes.io/projected/0667807b-dd56-4e0b-b2b7-9936b24ad975-kube-api-access-z6plr\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640774 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.640993 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.655752 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0667807b-dd56-4e0b-b2b7-9936b24ad975" (UID: "0667807b-dd56-4e0b-b2b7-9936b24ad975"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.742624 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.742682 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.742692 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0667807b-dd56-4e0b-b2b7-9936b24ad975-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.894404 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerID="75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977" exitCode=143 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.894469 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerDied","Data":"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977"} Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.901129 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.902009 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-h86xn" event={"ID":"0667807b-dd56-4e0b-b2b7-9936b24ad975","Type":"ContainerDied","Data":"762abeab209ce3bfff8550418c0fd3120feff71ae72a1d38c5f8399ade849207"} Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.902076 4926 scope.go:117] "RemoveContainer" containerID="156380288bb350c0ba1cffa92a44e6229c6cba775887be81ba0dc3cc886efefa" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.907963 4926 generic.go:334] "Generic (PLEG): container finished" podID="f5ede692-d924-48be-a7c5-b80104345b9c" containerID="da30d6c4e08a9b8cabde8f5f1bbb858dd5af689020b473d823ae4c6bae8bef09" exitCode=0 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.908001 4926 generic.go:334] "Generic (PLEG): container finished" podID="f5ede692-d924-48be-a7c5-b80104345b9c" containerID="ac75a2dbee16f6ef89ef43b57a95e669b9ad6506e66da77565f22a8909edd9e0" exitCode=143 Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.908054 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerDied","Data":"da30d6c4e08a9b8cabde8f5f1bbb858dd5af689020b473d823ae4c6bae8bef09"} Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.908089 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerDied","Data":"ac75a2dbee16f6ef89ef43b57a95e669b9ad6506e66da77565f22a8909edd9e0"} Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.915680 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.916018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vwr2z" event={"ID":"89f5e6c8-c807-4ec1-80d1-7b0ac0192d38","Type":"ContainerDied","Data":"373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf"} Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.916070 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="373e24673adffdbfb9d486d2999f332cc654521afbd635d78dae5707dafefcaf" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.959673 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 10:59:17 crc kubenswrapper[4926]: E1122 10:59:17.960164 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="dnsmasq-dns" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960184 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="dnsmasq-dns" Nov 22 10:59:17 crc kubenswrapper[4926]: E1122 10:59:17.960205 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" containerName="nova-cell1-conductor-db-sync" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960213 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" containerName="nova-cell1-conductor-db-sync" Nov 22 10:59:17 crc kubenswrapper[4926]: E1122 10:59:17.960232 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85f46bca-0fff-445a-9c52-c0f4478105ff" containerName="nova-manage" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960240 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="85f46bca-0fff-445a-9c52-c0f4478105ff" containerName="nova-manage" Nov 22 10:59:17 crc kubenswrapper[4926]: E1122 10:59:17.960255 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="init" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960264 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="init" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960501 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" containerName="dnsmasq-dns" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960521 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" containerName="nova-cell1-conductor-db-sync" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.960530 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="85f46bca-0fff-445a-9c52-c0f4478105ff" containerName="nova-manage" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.961281 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.965211 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.967704 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.970666 4926 scope.go:117] "RemoveContainer" containerID="bf687a61528e0d4aacd60b4c8eb5af6fc92914a091c034f33c244f03be19d041" Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.981328 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-h86xn"] Nov 22 10:59:17 crc kubenswrapper[4926]: I1122 10:59:17.989124 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.118903 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.157246 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xsd4\" (UniqueName: \"kubernetes.io/projected/cea60836-1c25-4c6c-8f9e-e64ab97d459a-kube-api-access-5xsd4\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.157389 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.157453 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.258590 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq76q\" (UniqueName: \"kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q\") pod \"f5ede692-d924-48be-a7c5-b80104345b9c\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.258627 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs\") pod \"f5ede692-d924-48be-a7c5-b80104345b9c\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.258808 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs\") pod \"f5ede692-d924-48be-a7c5-b80104345b9c\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.258879 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data\") pod \"f5ede692-d924-48be-a7c5-b80104345b9c\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.258956 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle\") pod \"f5ede692-d924-48be-a7c5-b80104345b9c\" (UID: \"f5ede692-d924-48be-a7c5-b80104345b9c\") " Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.259225 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xsd4\" (UniqueName: \"kubernetes.io/projected/cea60836-1c25-4c6c-8f9e-e64ab97d459a-kube-api-access-5xsd4\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.259298 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.259336 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.259774 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs" (OuterVolumeSpecName: "logs") pod "f5ede692-d924-48be-a7c5-b80104345b9c" (UID: "f5ede692-d924-48be-a7c5-b80104345b9c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.265762 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.266483 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cea60836-1c25-4c6c-8f9e-e64ab97d459a-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.268237 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q" (OuterVolumeSpecName: "kube-api-access-wq76q") pod "f5ede692-d924-48be-a7c5-b80104345b9c" (UID: "f5ede692-d924-48be-a7c5-b80104345b9c"). InnerVolumeSpecName "kube-api-access-wq76q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.276715 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xsd4\" (UniqueName: \"kubernetes.io/projected/cea60836-1c25-4c6c-8f9e-e64ab97d459a-kube-api-access-5xsd4\") pod \"nova-cell1-conductor-0\" (UID: \"cea60836-1c25-4c6c-8f9e-e64ab97d459a\") " pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.295326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.303688 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5ede692-d924-48be-a7c5-b80104345b9c" (UID: "f5ede692-d924-48be-a7c5-b80104345b9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.310190 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "f5ede692-d924-48be-a7c5-b80104345b9c" (UID: "f5ede692-d924-48be-a7c5-b80104345b9c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.310723 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data" (OuterVolumeSpecName: "config-data") pod "f5ede692-d924-48be-a7c5-b80104345b9c" (UID: "f5ede692-d924-48be-a7c5-b80104345b9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.363924 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ede692-d924-48be-a7c5-b80104345b9c-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.363964 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.363974 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.363985 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq76q\" (UniqueName: \"kubernetes.io/projected/f5ede692-d924-48be-a7c5-b80104345b9c-kube-api-access-wq76q\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.363999 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ede692-d924-48be-a7c5-b80104345b9c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.601182 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0667807b-dd56-4e0b-b2b7-9936b24ad975" path="/var/lib/kubelet/pods/0667807b-dd56-4e0b-b2b7-9936b24ad975/volumes" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.768858 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.930023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"f5ede692-d924-48be-a7c5-b80104345b9c","Type":"ContainerDied","Data":"c3c4d70f91b4222abac22567ea6629e5536975efac496e78ab96d05a83d88c0f"} Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.930059 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.930388 4926 scope.go:117] "RemoveContainer" containerID="da30d6c4e08a9b8cabde8f5f1bbb858dd5af689020b473d823ae4c6bae8bef09" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.933367 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cea60836-1c25-4c6c-8f9e-e64ab97d459a","Type":"ContainerStarted","Data":"f50c5583c0382f6259bf66177faf43a2a149c2312ecf7fdbe9237298524c1f02"} Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.937425 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerName="nova-scheduler-scheduler" containerID="cri-o://e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" gracePeriod=30 Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.961594 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.971048 4926 scope.go:117] "RemoveContainer" containerID="ac75a2dbee16f6ef89ef43b57a95e669b9ad6506e66da77565f22a8909edd9e0" Nov 22 10:59:18 crc kubenswrapper[4926]: I1122 10:59:18.993925 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.002422 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:19 crc kubenswrapper[4926]: E1122 10:59:19.002955 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-metadata" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.002977 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-metadata" Nov 22 10:59:19 crc kubenswrapper[4926]: E1122 10:59:19.002992 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-log" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.003001 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-log" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.003230 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-metadata" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.003258 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" containerName="nova-metadata-log" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.004455 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.008427 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.008639 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.013469 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.185271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.185313 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nld7\" (UniqueName: \"kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.185339 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.185411 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.185450 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.286869 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.286960 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nld7\" (UniqueName: \"kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.287025 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.287138 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.287199 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.287494 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.292349 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.292395 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.294764 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.323238 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nld7\" (UniqueName: \"kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7\") pod \"nova-metadata-0\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.337212 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 10:59:19 crc kubenswrapper[4926]: W1122 10:59:19.790717 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod69ff2b4a_0e22_4cb4_9f77_71753997ff1e.slice/crio-c45e7a5d243289054a3e1a2022811b7b2af3fecc2636ad6902963d52bbbc4b23 WatchSource:0}: Error finding container c45e7a5d243289054a3e1a2022811b7b2af3fecc2636ad6902963d52bbbc4b23: Status 404 returned error can't find the container with id c45e7a5d243289054a3e1a2022811b7b2af3fecc2636ad6902963d52bbbc4b23 Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.792021 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.872542 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.955557 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cea60836-1c25-4c6c-8f9e-e64ab97d459a","Type":"ContainerStarted","Data":"ddca85b3a70636d6aa12a882252bf11e9999680b7f0147ecd0815c5a85afd31f"} Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.956717 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.958197 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerStarted","Data":"c931adbfb6ce5309935eaf16e5b39e8391d241760f7218d4d8a825ca5041554f"} Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.958241 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerStarted","Data":"c45e7a5d243289054a3e1a2022811b7b2af3fecc2636ad6902963d52bbbc4b23"} Nov 22 10:59:19 crc kubenswrapper[4926]: I1122 10:59:19.976045 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.976026819 podStartE2EDuration="2.976026819s" podCreationTimestamp="2025-11-22 10:59:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:19.970616044 +0000 UTC m=+1180.272221321" watchObservedRunningTime="2025-11-22 10:59:19.976026819 +0000 UTC m=+1180.277632106" Nov 22 10:59:20 crc kubenswrapper[4926]: I1122 10:59:20.603691 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5ede692-d924-48be-a7c5-b80104345b9c" path="/var/lib/kubelet/pods/f5ede692-d924-48be-a7c5-b80104345b9c/volumes" Nov 22 10:59:20 crc kubenswrapper[4926]: I1122 10:59:20.967350 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerStarted","Data":"78c90e0d760173b471381827911256c647ae31786b71ebceffe409c8a9ec819f"} Nov 22 10:59:20 crc kubenswrapper[4926]: I1122 10:59:20.985857 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.985831508 podStartE2EDuration="2.985831508s" podCreationTimestamp="2025-11-22 10:59:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:20.983155151 +0000 UTC m=+1181.284760448" watchObservedRunningTime="2025-11-22 10:59:20.985831508 +0000 UTC m=+1181.287436795" Nov 22 10:59:21 crc kubenswrapper[4926]: E1122 10:59:21.321691 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 10:59:21 crc kubenswrapper[4926]: E1122 10:59:21.323772 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 10:59:21 crc kubenswrapper[4926]: E1122 10:59:21.325872 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 10:59:21 crc kubenswrapper[4926]: E1122 10:59:21.325913 4926 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerName="nova-scheduler-scheduler" Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.892921 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.907571 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.966956 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data\") pod \"05b9b91d-44f6-47d0-b509-eb056c022a0d\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.968920 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc4cp\" (UniqueName: \"kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp\") pod \"05b9b91d-44f6-47d0-b509-eb056c022a0d\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.969000 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle\") pod \"05b9b91d-44f6-47d0-b509-eb056c022a0d\" (UID: \"05b9b91d-44f6-47d0-b509-eb056c022a0d\") " Nov 22 10:59:22 crc kubenswrapper[4926]: I1122 10:59:22.982119 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp" (OuterVolumeSpecName: "kube-api-access-vc4cp") pod "05b9b91d-44f6-47d0-b509-eb056c022a0d" (UID: "05b9b91d-44f6-47d0-b509-eb056c022a0d"). InnerVolumeSpecName "kube-api-access-vc4cp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.001050 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data" (OuterVolumeSpecName: "config-data") pod "05b9b91d-44f6-47d0-b509-eb056c022a0d" (UID: "05b9b91d-44f6-47d0-b509-eb056c022a0d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.002935 4926 generic.go:334] "Generic (PLEG): container finished" podID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" exitCode=0 Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.002952 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.003137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"05b9b91d-44f6-47d0-b509-eb056c022a0d","Type":"ContainerDied","Data":"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d"} Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.003229 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"05b9b91d-44f6-47d0-b509-eb056c022a0d","Type":"ContainerDied","Data":"44f98768d5e72b6253d3ba3602a4db3b40e099f729ba8079707c5e5e2c5f1bac"} Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.003319 4926 scope.go:117] "RemoveContainer" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.005162 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "05b9b91d-44f6-47d0-b509-eb056c022a0d" (UID: "05b9b91d-44f6-47d0-b509-eb056c022a0d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.006506 4926 generic.go:334] "Generic (PLEG): container finished" podID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerID="70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464" exitCode=0 Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.006575 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerDied","Data":"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464"} Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.006601 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8b805a48-aee9-4243-b9eb-32bb0f948260","Type":"ContainerDied","Data":"7c9d599b6159c7fef83334f91a7ef154173a1f74f865cefeb45d3d857b0ec5b7"} Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.006681 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.042576 4926 scope.go:117] "RemoveContainer" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.043921 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d\": container with ID starting with e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d not found: ID does not exist" containerID="e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.043973 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d"} err="failed to get container status \"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d\": rpc error: code = NotFound desc = could not find container \"e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d\": container with ID starting with e5c1b26c0ef07a965fb09ecdb4304e0cdb238a698fd2f302da1175fb2dd5806d not found: ID does not exist" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.044000 4926 scope.go:117] "RemoveContainer" containerID="70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.065574 4926 scope.go:117] "RemoveContainer" containerID="75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.071288 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle\") pod \"8b805a48-aee9-4243-b9eb-32bb0f948260\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.071669 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data\") pod \"8b805a48-aee9-4243-b9eb-32bb0f948260\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.072223 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs\") pod \"8b805a48-aee9-4243-b9eb-32bb0f948260\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.072403 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q76sl\" (UniqueName: \"kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl\") pod \"8b805a48-aee9-4243-b9eb-32bb0f948260\" (UID: \"8b805a48-aee9-4243-b9eb-32bb0f948260\") " Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.073164 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.073293 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc4cp\" (UniqueName: \"kubernetes.io/projected/05b9b91d-44f6-47d0-b509-eb056c022a0d-kube-api-access-vc4cp\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.073401 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/05b9b91d-44f6-47d0-b509-eb056c022a0d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.073301 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs" (OuterVolumeSpecName: "logs") pod "8b805a48-aee9-4243-b9eb-32bb0f948260" (UID: "8b805a48-aee9-4243-b9eb-32bb0f948260"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.076503 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl" (OuterVolumeSpecName: "kube-api-access-q76sl") pod "8b805a48-aee9-4243-b9eb-32bb0f948260" (UID: "8b805a48-aee9-4243-b9eb-32bb0f948260"). InnerVolumeSpecName "kube-api-access-q76sl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.089618 4926 scope.go:117] "RemoveContainer" containerID="70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464" Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.090112 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464\": container with ID starting with 70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464 not found: ID does not exist" containerID="70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.090139 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464"} err="failed to get container status \"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464\": rpc error: code = NotFound desc = could not find container \"70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464\": container with ID starting with 70a66a7b2c251a06fae0ced9b76c216e085252f73e0c0d4b9c871e1f4a74c464 not found: ID does not exist" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.090162 4926 scope.go:117] "RemoveContainer" containerID="75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977" Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.090625 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977\": container with ID starting with 75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977 not found: ID does not exist" containerID="75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.090652 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977"} err="failed to get container status \"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977\": rpc error: code = NotFound desc = could not find container \"75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977\": container with ID starting with 75ee3b88cf4348a27634025661c549ee36fd6d79d54ecb642a261ceff675b977 not found: ID does not exist" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.103824 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8b805a48-aee9-4243-b9eb-32bb0f948260" (UID: "8b805a48-aee9-4243-b9eb-32bb0f948260"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.106236 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data" (OuterVolumeSpecName: "config-data") pod "8b805a48-aee9-4243-b9eb-32bb0f948260" (UID: "8b805a48-aee9-4243-b9eb-32bb0f948260"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.175130 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8b805a48-aee9-4243-b9eb-32bb0f948260-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.175167 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q76sl\" (UniqueName: \"kubernetes.io/projected/8b805a48-aee9-4243-b9eb-32bb0f948260-kube-api-access-q76sl\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.175182 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.175191 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8b805a48-aee9-4243-b9eb-32bb0f948260-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.327250 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.343309 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.359588 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.377033 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.391930 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.413189 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.413683 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-log" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.413710 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-log" Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.413735 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerName="nova-scheduler-scheduler" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.413743 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerName="nova-scheduler-scheduler" Nov 22 10:59:23 crc kubenswrapper[4926]: E1122 10:59:23.413774 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-api" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.413783 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-api" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.414006 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-api" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.414023 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" containerName="nova-scheduler-scheduler" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.414039 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" containerName="nova-api-log" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.414814 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.417406 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.424339 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.425971 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.429647 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.442596 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.450163 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.582526 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.583129 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnxvq\" (UniqueName: \"kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.583285 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.583463 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.583680 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.583853 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96jv6\" (UniqueName: \"kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.584077 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.640856 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.641122 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="23355e5d-b81e-47b5-ac62-ee7b22c33708" containerName="kube-state-metrics" containerID="cri-o://adc75b77dd0d66b919725530cfd81ad0eb550544613815b661b1ef464620d1c5" gracePeriod=30 Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685217 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685272 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96jv6\" (UniqueName: \"kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685349 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685391 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnxvq\" (UniqueName: \"kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685413 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685444 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.685918 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.691461 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.691472 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.691589 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.693045 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.706343 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96jv6\" (UniqueName: \"kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6\") pod \"nova-api-0\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " pod="openstack/nova-api-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.706872 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnxvq\" (UniqueName: \"kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq\") pod \"nova-scheduler-0\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.811326 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 10:59:23 crc kubenswrapper[4926]: I1122 10:59:23.819301 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.028922 4926 generic.go:334] "Generic (PLEG): container finished" podID="23355e5d-b81e-47b5-ac62-ee7b22c33708" containerID="adc75b77dd0d66b919725530cfd81ad0eb550544613815b661b1ef464620d1c5" exitCode=2 Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.028982 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23355e5d-b81e-47b5-ac62-ee7b22c33708","Type":"ContainerDied","Data":"adc75b77dd0d66b919725530cfd81ad0eb550544613815b661b1ef464620d1c5"} Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.089298 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.198312 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l85v\" (UniqueName: \"kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v\") pod \"23355e5d-b81e-47b5-ac62-ee7b22c33708\" (UID: \"23355e5d-b81e-47b5-ac62-ee7b22c33708\") " Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.204618 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v" (OuterVolumeSpecName: "kube-api-access-7l85v") pod "23355e5d-b81e-47b5-ac62-ee7b22c33708" (UID: "23355e5d-b81e-47b5-ac62-ee7b22c33708"). InnerVolumeSpecName "kube-api-access-7l85v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.301326 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l85v\" (UniqueName: \"kubernetes.io/projected/23355e5d-b81e-47b5-ac62-ee7b22c33708-kube-api-access-7l85v\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.337680 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.337799 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.372988 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 10:59:24 crc kubenswrapper[4926]: W1122 10:59:24.377395 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc44886b1_1d10_4c9a_b13e_9e53d75e978b.slice/crio-e543841382afad84aa8eb5fea36bd5642fd0aafd4a031e898bb7553cd9702a3d WatchSource:0}: Error finding container e543841382afad84aa8eb5fea36bd5642fd0aafd4a031e898bb7553cd9702a3d: Status 404 returned error can't find the container with id e543841382afad84aa8eb5fea36bd5642fd0aafd4a031e898bb7553cd9702a3d Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.383528 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.604703 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05b9b91d-44f6-47d0-b509-eb056c022a0d" path="/var/lib/kubelet/pods/05b9b91d-44f6-47d0-b509-eb056c022a0d/volumes" Nov 22 10:59:24 crc kubenswrapper[4926]: I1122 10:59:24.605733 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b805a48-aee9-4243-b9eb-32bb0f948260" path="/var/lib/kubelet/pods/8b805a48-aee9-4243-b9eb-32bb0f948260/volumes" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.049744 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f442b1e7-127c-4808-b6c0-57a419fc276b","Type":"ContainerStarted","Data":"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393"} Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.050014 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f442b1e7-127c-4808-b6c0-57a419fc276b","Type":"ContainerStarted","Data":"8dd601b41febc5af968d0b0e351a9cc1fa354d68e0dd6d97b8372109994333cf"} Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.059072 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerStarted","Data":"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396"} Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.059111 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerStarted","Data":"e543841382afad84aa8eb5fea36bd5642fd0aafd4a031e898bb7553cd9702a3d"} Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.060800 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.060839 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"23355e5d-b81e-47b5-ac62-ee7b22c33708","Type":"ContainerDied","Data":"f9e2e3d3a31330d06cf229cf68801aa045e4a9d244a781e87a7a8b8aa1a6b5cf"} Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.060867 4926 scope.go:117] "RemoveContainer" containerID="adc75b77dd0d66b919725530cfd81ad0eb550544613815b661b1ef464620d1c5" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.073691 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.073671257 podStartE2EDuration="2.073671257s" podCreationTimestamp="2025-11-22 10:59:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:25.062827296 +0000 UTC m=+1185.364432593" watchObservedRunningTime="2025-11-22 10:59:25.073671257 +0000 UTC m=+1185.375276554" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.098226 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.116437 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.126932 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: E1122 10:59:25.127333 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23355e5d-b81e-47b5-ac62-ee7b22c33708" containerName="kube-state-metrics" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.127350 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="23355e5d-b81e-47b5-ac62-ee7b22c33708" containerName="kube-state-metrics" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.127550 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="23355e5d-b81e-47b5-ac62-ee7b22c33708" containerName="kube-state-metrics" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.128287 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.133205 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.133791 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.135332 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.218955 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.219044 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbjbm\" (UniqueName: \"kubernetes.io/projected/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-api-access-sbjbm\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.219066 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.219419 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.320733 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbjbm\" (UniqueName: \"kubernetes.io/projected/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-api-access-sbjbm\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.321015 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.321157 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.321496 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.325784 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.333199 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.338856 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.345478 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbjbm\" (UniqueName: \"kubernetes.io/projected/a5fc01d6-133f-4899-926b-3e4ff8c68f0b-kube-api-access-sbjbm\") pod \"kube-state-metrics-0\" (UID: \"a5fc01d6-133f-4899-926b-3e4ff8c68f0b\") " pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.448395 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.520801 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.521199 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-notification-agent" containerID="cri-o://61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502" gracePeriod=30 Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.521205 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="proxy-httpd" containerID="cri-o://714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9" gracePeriod=30 Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.521208 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="sg-core" containerID="cri-o://04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2" gracePeriod=30 Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.521631 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-central-agent" containerID="cri-o://ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b" gracePeriod=30 Nov 22 10:59:25 crc kubenswrapper[4926]: I1122 10:59:25.948659 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 10:59:25 crc kubenswrapper[4926]: W1122 10:59:25.957496 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda5fc01d6_133f_4899_926b_3e4ff8c68f0b.slice/crio-837cc46a80bead0e4df47da08826d2336d4ba3806886b993df2dcda77c7c0395 WatchSource:0}: Error finding container 837cc46a80bead0e4df47da08826d2336d4ba3806886b993df2dcda77c7c0395: Status 404 returned error can't find the container with id 837cc46a80bead0e4df47da08826d2336d4ba3806886b993df2dcda77c7c0395 Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073316 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerID="714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9" exitCode=0 Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073392 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerID="04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2" exitCode=2 Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073437 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerID="ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b" exitCode=0 Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073533 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerDied","Data":"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9"} Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073565 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerDied","Data":"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2"} Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.073600 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerDied","Data":"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b"} Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.076127 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a5fc01d6-133f-4899-926b-3e4ff8c68f0b","Type":"ContainerStarted","Data":"837cc46a80bead0e4df47da08826d2336d4ba3806886b993df2dcda77c7c0395"} Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.078119 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerStarted","Data":"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36"} Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.102426 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.102410939 podStartE2EDuration="3.102410939s" podCreationTimestamp="2025-11-22 10:59:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:26.100264937 +0000 UTC m=+1186.401870224" watchObservedRunningTime="2025-11-22 10:59:26.102410939 +0000 UTC m=+1186.404016226" Nov 22 10:59:26 crc kubenswrapper[4926]: I1122 10:59:26.594680 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23355e5d-b81e-47b5-ac62-ee7b22c33708" path="/var/lib/kubelet/pods/23355e5d-b81e-47b5-ac62-ee7b22c33708/volumes" Nov 22 10:59:27 crc kubenswrapper[4926]: I1122 10:59:27.093118 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"a5fc01d6-133f-4899-926b-3e4ff8c68f0b","Type":"ContainerStarted","Data":"6df383187595195e2d3b8281ca37cbf849bf969a737156f921e3e6b9e166e743"} Nov 22 10:59:27 crc kubenswrapper[4926]: I1122 10:59:27.093441 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 10:59:27 crc kubenswrapper[4926]: I1122 10:59:27.120762 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.36753806 podStartE2EDuration="2.120744591s" podCreationTimestamp="2025-11-22 10:59:25 +0000 UTC" firstStartedPulling="2025-11-22 10:59:25.959845492 +0000 UTC m=+1186.261450779" lastFinishedPulling="2025-11-22 10:59:26.713052023 +0000 UTC m=+1187.014657310" observedRunningTime="2025-11-22 10:59:27.115975834 +0000 UTC m=+1187.417581121" watchObservedRunningTime="2025-11-22 10:59:27.120744591 +0000 UTC m=+1187.422349878" Nov 22 10:59:28 crc kubenswrapper[4926]: I1122 10:59:28.812616 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 10:59:29 crc kubenswrapper[4926]: I1122 10:59:29.338072 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 10:59:29 crc kubenswrapper[4926]: I1122 10:59:29.338144 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.112984 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.124381 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerID="61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502" exitCode=0 Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.124434 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerDied","Data":"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502"} Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.124501 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cf163d93-140e-4b0d-bcc4-9fffee4712b3","Type":"ContainerDied","Data":"4c7382f67b374fff8bfed644be89d9a850137bb6f8104e8d044fe866525f2523"} Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.124528 4926 scope.go:117] "RemoveContainer" containerID="714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.124472 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.173356 4926 scope.go:117] "RemoveContainer" containerID="04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.199442 4926 scope.go:117] "RemoveContainer" containerID="61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.217024 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.217562 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.217743 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.218315 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.217777 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.218944 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.219047 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.219082 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.219178 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45dld\" (UniqueName: \"kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld\") pod \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\" (UID: \"cf163d93-140e-4b0d-bcc4-9fffee4712b3\") " Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.220476 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.220880 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cf163d93-140e-4b0d-bcc4-9fffee4712b3-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.222772 4926 scope.go:117] "RemoveContainer" containerID="ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.232433 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld" (OuterVolumeSpecName: "kube-api-access-45dld") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "kube-api-access-45dld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.235499 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts" (OuterVolumeSpecName: "scripts") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.249285 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.319657 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.323227 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45dld\" (UniqueName: \"kubernetes.io/projected/cf163d93-140e-4b0d-bcc4-9fffee4712b3-kube-api-access-45dld\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.323275 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.323290 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.323305 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.334513 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data" (OuterVolumeSpecName: "config-data") pod "cf163d93-140e-4b0d-bcc4-9fffee4712b3" (UID: "cf163d93-140e-4b0d-bcc4-9fffee4712b3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.369256 4926 scope.go:117] "RemoveContainer" containerID="714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.369762 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9\": container with ID starting with 714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9 not found: ID does not exist" containerID="714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.369817 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9"} err="failed to get container status \"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9\": rpc error: code = NotFound desc = could not find container \"714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9\": container with ID starting with 714c95772f9ce0959495da8e9f02baf2d62cbb84fc3b9650167aa4c2bbdaafd9 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.369855 4926 scope.go:117] "RemoveContainer" containerID="04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.370303 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2\": container with ID starting with 04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2 not found: ID does not exist" containerID="04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.370337 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2"} err="failed to get container status \"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2\": rpc error: code = NotFound desc = could not find container \"04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2\": container with ID starting with 04355b4470a06df0bcb09ee91fd635d51b5c37bfd3192b5c8eb2d7cc87fe1fc2 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.370357 4926 scope.go:117] "RemoveContainer" containerID="61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.370649 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502\": container with ID starting with 61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502 not found: ID does not exist" containerID="61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.370677 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502"} err="failed to get container status \"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502\": rpc error: code = NotFound desc = could not find container \"61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502\": container with ID starting with 61802681c2f9ee25f8c4034e975d4cd836b40dec1c3983d1c537547a11e9d502 not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.370699 4926 scope.go:117] "RemoveContainer" containerID="ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.370991 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b\": container with ID starting with ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b not found: ID does not exist" containerID="ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.371024 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b"} err="failed to get container status \"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b\": rpc error: code = NotFound desc = could not find container \"ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b\": container with ID starting with ade90dbaf56b612ec437f180ec7641294939e95d53418b7ff3cf57c2d44af36b not found: ID does not exist" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.386116 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.386124 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.426133 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf163d93-140e-4b0d-bcc4-9fffee4712b3-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.459535 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.470353 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.491212 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.491716 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.491740 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.491759 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.491770 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.491791 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.491799 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4926]: E1122 10:59:30.491810 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.491817 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.492110 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-notification-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.492128 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="sg-core" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.492138 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="ceilometer-central-agent" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.492146 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" containerName="proxy-httpd" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.494227 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.496414 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.496514 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.498728 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.500940 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.592506 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf163d93-140e-4b0d-bcc4-9fffee4712b3" path="/var/lib/kubelet/pods/cf163d93-140e-4b0d-bcc4-9fffee4712b3/volumes" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634375 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634437 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634573 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634734 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.634770 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2hq2\" (UniqueName: \"kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.635006 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.635096 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.736995 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.737448 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.737566 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.737703 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.737830 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.737979 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.738089 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2hq2\" (UniqueName: \"kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.738256 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.738372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.738829 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.742590 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.742676 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.744185 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.744601 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.752646 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.757317 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2hq2\" (UniqueName: \"kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2\") pod \"ceilometer-0\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " pod="openstack/ceilometer-0" Nov 22 10:59:30 crc kubenswrapper[4926]: I1122 10:59:30.814380 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:31 crc kubenswrapper[4926]: I1122 10:59:31.368430 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:32 crc kubenswrapper[4926]: I1122 10:59:32.153034 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerStarted","Data":"99424beed41139b6af4b6dcb5729c4a399a52f7c4d29aa3be3963c2a4d248b48"} Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.168644 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerStarted","Data":"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad"} Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.168934 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerStarted","Data":"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca"} Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.812687 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.820774 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.820861 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 10:59:33 crc kubenswrapper[4926]: I1122 10:59:33.846064 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 10:59:34 crc kubenswrapper[4926]: I1122 10:59:34.180193 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerStarted","Data":"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49"} Nov 22 10:59:34 crc kubenswrapper[4926]: I1122 10:59:34.214792 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 10:59:34 crc kubenswrapper[4926]: I1122 10:59:34.903097 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.197:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 10:59:34 crc kubenswrapper[4926]: I1122 10:59:34.903124 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.197:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 10:59:35 crc kubenswrapper[4926]: I1122 10:59:35.192759 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerStarted","Data":"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99"} Nov 22 10:59:35 crc kubenswrapper[4926]: I1122 10:59:35.192811 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:59:35 crc kubenswrapper[4926]: I1122 10:59:35.219969 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.977022209 podStartE2EDuration="5.219950116s" podCreationTimestamp="2025-11-22 10:59:30 +0000 UTC" firstStartedPulling="2025-11-22 10:59:31.361594966 +0000 UTC m=+1191.663200253" lastFinishedPulling="2025-11-22 10:59:34.604522873 +0000 UTC m=+1194.906128160" observedRunningTime="2025-11-22 10:59:35.214621023 +0000 UTC m=+1195.516226310" watchObservedRunningTime="2025-11-22 10:59:35.219950116 +0000 UTC m=+1195.521555403" Nov 22 10:59:35 crc kubenswrapper[4926]: I1122 10:59:35.463692 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.347130 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.347728 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.354576 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.356577 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.661294 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.661365 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.661418 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.661962 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 10:59:39 crc kubenswrapper[4926]: I1122 10:59:39.662019 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375" gracePeriod=600 Nov 22 10:59:40 crc kubenswrapper[4926]: I1122 10:59:40.239157 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375" exitCode=0 Nov 22 10:59:40 crc kubenswrapper[4926]: I1122 10:59:40.239235 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375"} Nov 22 10:59:40 crc kubenswrapper[4926]: I1122 10:59:40.239490 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d"} Nov 22 10:59:40 crc kubenswrapper[4926]: I1122 10:59:40.239511 4926 scope.go:117] "RemoveContainer" containerID="324c8a547404e72a94f3704898c15718c3b4d8e320319c01811edb40ae550f2e" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.177506 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.256717 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data\") pod \"1ad9946b-4e9e-4366-9a71-da0887c1b083\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.256863 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle\") pod \"1ad9946b-4e9e-4366-9a71-da0887c1b083\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.256910 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tmgs\" (UniqueName: \"kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs\") pod \"1ad9946b-4e9e-4366-9a71-da0887c1b083\" (UID: \"1ad9946b-4e9e-4366-9a71-da0887c1b083\") " Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.266224 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs" (OuterVolumeSpecName: "kube-api-access-2tmgs") pod "1ad9946b-4e9e-4366-9a71-da0887c1b083" (UID: "1ad9946b-4e9e-4366-9a71-da0887c1b083"). InnerVolumeSpecName "kube-api-access-2tmgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.268551 4926 generic.go:334] "Generic (PLEG): container finished" podID="1ad9946b-4e9e-4366-9a71-da0887c1b083" containerID="5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee" exitCode=137 Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.268601 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1ad9946b-4e9e-4366-9a71-da0887c1b083","Type":"ContainerDied","Data":"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee"} Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.268635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"1ad9946b-4e9e-4366-9a71-da0887c1b083","Type":"ContainerDied","Data":"b2c41d56a0232d6d8c14854cefdbb063e64ae73b234d16ee7db4202d2741bb74"} Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.268655 4926 scope.go:117] "RemoveContainer" containerID="5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.268791 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.301335 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ad9946b-4e9e-4366-9a71-da0887c1b083" (UID: "1ad9946b-4e9e-4366-9a71-da0887c1b083"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.302399 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data" (OuterVolumeSpecName: "config-data") pod "1ad9946b-4e9e-4366-9a71-da0887c1b083" (UID: "1ad9946b-4e9e-4366-9a71-da0887c1b083"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.359408 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tmgs\" (UniqueName: \"kubernetes.io/projected/1ad9946b-4e9e-4366-9a71-da0887c1b083-kube-api-access-2tmgs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.359448 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.359463 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ad9946b-4e9e-4366-9a71-da0887c1b083-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.382285 4926 scope.go:117] "RemoveContainer" containerID="5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee" Nov 22 10:59:42 crc kubenswrapper[4926]: E1122 10:59:42.383490 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee\": container with ID starting with 5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee not found: ID does not exist" containerID="5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.383646 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee"} err="failed to get container status \"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee\": rpc error: code = NotFound desc = could not find container \"5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee\": container with ID starting with 5bb9326dba542bc13789f7b389ad8e161d6e35a5cc10feedaf8773ecf5063aee not found: ID does not exist" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.612740 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.622755 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.632786 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:42 crc kubenswrapper[4926]: E1122 10:59:42.633197 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ad9946b-4e9e-4366-9a71-da0887c1b083" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.633217 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ad9946b-4e9e-4366-9a71-da0887c1b083" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.633429 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ad9946b-4e9e-4366-9a71-da0887c1b083" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.634116 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.637153 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.638331 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.638532 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.642716 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.768972 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.769089 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.769154 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.769217 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.769245 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtrp8\" (UniqueName: \"kubernetes.io/projected/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-kube-api-access-rtrp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.870575 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.870704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.870746 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtrp8\" (UniqueName: \"kubernetes.io/projected/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-kube-api-access-rtrp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.870846 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.870973 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.878607 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.878740 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.878820 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.879500 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.902554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtrp8\" (UniqueName: \"kubernetes.io/projected/ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2-kube-api-access-rtrp8\") pod \"nova-cell1-novncproxy-0\" (UID: \"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:42 crc kubenswrapper[4926]: I1122 10:59:42.964359 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:43 crc kubenswrapper[4926]: I1122 10:59:43.404518 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 10:59:43 crc kubenswrapper[4926]: I1122 10:59:43.824989 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 10:59:43 crc kubenswrapper[4926]: I1122 10:59:43.825601 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 10:59:43 crc kubenswrapper[4926]: I1122 10:59:43.827178 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 10:59:43 crc kubenswrapper[4926]: I1122 10:59:43.841340 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.289056 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2","Type":"ContainerStarted","Data":"8677041c2f0635b2269df052569008fb3be2777bbfad47ddc84c70edaf292249"} Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.289322 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.289340 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2","Type":"ContainerStarted","Data":"62c5a684b3aefbc403c14c182e614add106bbb421536b6ffa50faae4d270e5d0"} Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.298056 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.312576 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.3125565200000002 podStartE2EDuration="2.31255652s" podCreationTimestamp="2025-11-22 10:59:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:44.304028055 +0000 UTC m=+1204.605633342" watchObservedRunningTime="2025-11-22 10:59:44.31255652 +0000 UTC m=+1204.614161807" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.484073 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.486684 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.543434 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.599716 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ad9946b-4e9e-4366-9a71-da0887c1b083" path="/var/lib/kubelet/pods/1ad9946b-4e9e-4366-9a71-da0887c1b083/volumes" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.612869 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.613086 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.613292 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.613469 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.613560 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.613640 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.714693 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.715214 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.715287 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.715372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.715427 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.715479 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.716799 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.717128 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.717960 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.718262 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.718373 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.736554 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j\") pod \"dnsmasq-dns-5c7b6c5df9-ptcmn\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:44 crc kubenswrapper[4926]: I1122 10:59:44.820555 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:45 crc kubenswrapper[4926]: I1122 10:59:45.339245 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 10:59:45 crc kubenswrapper[4926]: W1122 10:59:45.340348 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13ac548d_31ed_4cec_b356_ce3ae008af91.slice/crio-d39b1a50901da18eee1d77d29ffde4cb3461badebfb56cec4b6cbcd1530efff9 WatchSource:0}: Error finding container d39b1a50901da18eee1d77d29ffde4cb3461badebfb56cec4b6cbcd1530efff9: Status 404 returned error can't find the container with id d39b1a50901da18eee1d77d29ffde4cb3461badebfb56cec4b6cbcd1530efff9 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.313232 4926 generic.go:334] "Generic (PLEG): container finished" podID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerID="be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78" exitCode=0 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.313317 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" event={"ID":"13ac548d-31ed-4cec-b356-ce3ae008af91","Type":"ContainerDied","Data":"be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78"} Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.313842 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" event={"ID":"13ac548d-31ed-4cec-b356-ce3ae008af91","Type":"ContainerStarted","Data":"d39b1a50901da18eee1d77d29ffde4cb3461badebfb56cec4b6cbcd1530efff9"} Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.389821 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.392440 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-central-agent" containerID="cri-o://27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca" gracePeriod=30 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.392698 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="proxy-httpd" containerID="cri-o://faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99" gracePeriod=30 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.392756 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="sg-core" containerID="cri-o://c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49" gracePeriod=30 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.392803 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-notification-agent" containerID="cri-o://cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad" gracePeriod=30 Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.501013 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.199:3000/\": read tcp 10.217.0.2:42600->10.217.0.199:3000: read: connection reset by peer" Nov 22 10:59:46 crc kubenswrapper[4926]: I1122 10:59:46.839056 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.325921 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" event={"ID":"13ac548d-31ed-4cec-b356-ce3ae008af91","Type":"ContainerStarted","Data":"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915"} Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.326261 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328812 4926 generic.go:334] "Generic (PLEG): container finished" podID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerID="faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99" exitCode=0 Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328845 4926 generic.go:334] "Generic (PLEG): container finished" podID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerID="c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49" exitCode=2 Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328856 4926 generic.go:334] "Generic (PLEG): container finished" podID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerID="27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca" exitCode=0 Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328904 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerDied","Data":"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99"} Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328942 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerDied","Data":"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49"} Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.328956 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerDied","Data":"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca"} Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.329079 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-log" containerID="cri-o://aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396" gracePeriod=30 Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.329114 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-api" containerID="cri-o://276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36" gracePeriod=30 Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.356418 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" podStartSLOduration=3.356398139 podStartE2EDuration="3.356398139s" podCreationTimestamp="2025-11-22 10:59:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:47.345840897 +0000 UTC m=+1207.647446194" watchObservedRunningTime="2025-11-22 10:59:47.356398139 +0000 UTC m=+1207.658003426" Nov 22 10:59:47 crc kubenswrapper[4926]: I1122 10:59:47.965484 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:48 crc kubenswrapper[4926]: I1122 10:59:48.370805 4926 generic.go:334] "Generic (PLEG): container finished" podID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerID="aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396" exitCode=143 Nov 22 10:59:48 crc kubenswrapper[4926]: I1122 10:59:48.371745 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerDied","Data":"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396"} Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.878965 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914320 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914393 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914482 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914561 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914668 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914691 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914707 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2hq2\" (UniqueName: \"kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.914774 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle\") pod \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\" (UID: \"131956d4-b7eb-472a-bfb0-e714ee99aa5a\") " Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.915032 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.915206 4926 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.915275 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.920274 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts" (OuterVolumeSpecName: "scripts") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.927321 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2" (OuterVolumeSpecName: "kube-api-access-l2hq2") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "kube-api-access-l2hq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.948432 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:49 crc kubenswrapper[4926]: I1122 10:59:49.978861 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.016434 4926 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.016621 4926 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/131956d4-b7eb-472a-bfb0-e714ee99aa5a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.016685 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.016774 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2hq2\" (UniqueName: \"kubernetes.io/projected/131956d4-b7eb-472a-bfb0-e714ee99aa5a-kube-api-access-l2hq2\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.016837 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.019779 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.036192 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data" (OuterVolumeSpecName: "config-data") pod "131956d4-b7eb-472a-bfb0-e714ee99aa5a" (UID: "131956d4-b7eb-472a-bfb0-e714ee99aa5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.118291 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.118327 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/131956d4-b7eb-472a-bfb0-e714ee99aa5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.390238 4926 generic.go:334] "Generic (PLEG): container finished" podID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerID="cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad" exitCode=0 Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.390286 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerDied","Data":"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad"} Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.390316 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"131956d4-b7eb-472a-bfb0-e714ee99aa5a","Type":"ContainerDied","Data":"99424beed41139b6af4b6dcb5729c4a399a52f7c4d29aa3be3963c2a4d248b48"} Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.390325 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.390335 4926 scope.go:117] "RemoveContainer" containerID="faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.424141 4926 scope.go:117] "RemoveContainer" containerID="c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.424947 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.437952 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.445465 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.445948 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="proxy-httpd" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.445975 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="proxy-httpd" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.446017 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="sg-core" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446026 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="sg-core" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.446043 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-central-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446052 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-central-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.446069 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-notification-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446077 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-notification-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446276 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="proxy-httpd" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446304 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="sg-core" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446324 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-central-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.446335 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" containerName="ceilometer-notification-agent" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.448541 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.452299 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.452549 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.454012 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.464358 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.471112 4926 scope.go:117] "RemoveContainer" containerID="cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.499721 4926 scope.go:117] "RemoveContainer" containerID="27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.519987 4926 scope.go:117] "RemoveContainer" containerID="faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.520511 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99\": container with ID starting with faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99 not found: ID does not exist" containerID="faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.520552 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99"} err="failed to get container status \"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99\": rpc error: code = NotFound desc = could not find container \"faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99\": container with ID starting with faa6b14e99d65e34d5e924cc130597e1e2fdadfe8c78f1e599415ff698a18d99 not found: ID does not exist" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.520582 4926 scope.go:117] "RemoveContainer" containerID="c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.520928 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49\": container with ID starting with c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49 not found: ID does not exist" containerID="c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.520966 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49"} err="failed to get container status \"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49\": rpc error: code = NotFound desc = could not find container \"c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49\": container with ID starting with c4bf729be5df818cc28bfb76ef6b4e94821627f682576602e63f10094eafac49 not found: ID does not exist" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.520993 4926 scope.go:117] "RemoveContainer" containerID="cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.521283 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad\": container with ID starting with cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad not found: ID does not exist" containerID="cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.521326 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad"} err="failed to get container status \"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad\": rpc error: code = NotFound desc = could not find container \"cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad\": container with ID starting with cc2df8fcb3c51fc96cc6c6afb171fa6019bfa336e8ed0d2ca02b3af384e283ad not found: ID does not exist" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.521353 4926 scope.go:117] "RemoveContainer" containerID="27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca" Nov 22 10:59:50 crc kubenswrapper[4926]: E1122 10:59:50.521673 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca\": container with ID starting with 27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca not found: ID does not exist" containerID="27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.521702 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca"} err="failed to get container status \"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca\": rpc error: code = NotFound desc = could not find container \"27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca\": container with ID starting with 27086137beba520f2fb742ba30c166dd964e65f9a8cd87f3f971ab212e37e9ca not found: ID does not exist" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526019 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526175 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-run-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526226 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-config-data\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526253 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9fbd\" (UniqueName: \"kubernetes.io/projected/5e5f48cd-d405-4431-8ab1-de058f7c0f52-kube-api-access-k9fbd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526584 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-log-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526661 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-scripts\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526739 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.526839 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.600009 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="131956d4-b7eb-472a-bfb0-e714ee99aa5a" path="/var/lib/kubelet/pods/131956d4-b7eb-472a-bfb0-e714ee99aa5a/volumes" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628178 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-log-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628238 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-scripts\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628264 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628324 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628372 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628517 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-config-data\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628537 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-run-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9fbd\" (UniqueName: \"kubernetes.io/projected/5e5f48cd-d405-4431-8ab1-de058f7c0f52-kube-api-access-k9fbd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.628765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-log-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.629845 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5e5f48cd-d405-4431-8ab1-de058f7c0f52-run-httpd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.633436 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.634087 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.634836 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.635309 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-config-data\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.652745 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5e5f48cd-d405-4431-8ab1-de058f7c0f52-scripts\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.653679 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9fbd\" (UniqueName: \"kubernetes.io/projected/5e5f48cd-d405-4431-8ab1-de058f7c0f52-kube-api-access-k9fbd\") pod \"ceilometer-0\" (UID: \"5e5f48cd-d405-4431-8ab1-de058f7c0f52\") " pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.779982 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 10:59:50 crc kubenswrapper[4926]: I1122 10:59:50.912006 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.035636 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs\") pod \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.035741 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data\") pod \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.035772 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle\") pod \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.035872 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96jv6\" (UniqueName: \"kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6\") pod \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\" (UID: \"c44886b1-1d10-4c9a-b13e-9e53d75e978b\") " Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.036411 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs" (OuterVolumeSpecName: "logs") pod "c44886b1-1d10-4c9a-b13e-9e53d75e978b" (UID: "c44886b1-1d10-4c9a-b13e-9e53d75e978b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.043033 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6" (OuterVolumeSpecName: "kube-api-access-96jv6") pod "c44886b1-1d10-4c9a-b13e-9e53d75e978b" (UID: "c44886b1-1d10-4c9a-b13e-9e53d75e978b"). InnerVolumeSpecName "kube-api-access-96jv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.088154 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c44886b1-1d10-4c9a-b13e-9e53d75e978b" (UID: "c44886b1-1d10-4c9a-b13e-9e53d75e978b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.092094 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data" (OuterVolumeSpecName: "config-data") pod "c44886b1-1d10-4c9a-b13e-9e53d75e978b" (UID: "c44886b1-1d10-4c9a-b13e-9e53d75e978b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.140800 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96jv6\" (UniqueName: \"kubernetes.io/projected/c44886b1-1d10-4c9a-b13e-9e53d75e978b-kube-api-access-96jv6\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.140847 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c44886b1-1d10-4c9a-b13e-9e53d75e978b-logs\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.140858 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.140869 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c44886b1-1d10-4c9a-b13e-9e53d75e978b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.245321 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 10:59:51 crc kubenswrapper[4926]: W1122 10:59:51.251649 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5e5f48cd_d405_4431_8ab1_de058f7c0f52.slice/crio-f88d8ddd7789ae4a9aace3e6782878e1b7e3b8d2addc8f6db1872040eaef863e WatchSource:0}: Error finding container f88d8ddd7789ae4a9aace3e6782878e1b7e3b8d2addc8f6db1872040eaef863e: Status 404 returned error can't find the container with id f88d8ddd7789ae4a9aace3e6782878e1b7e3b8d2addc8f6db1872040eaef863e Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.400356 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5e5f48cd-d405-4431-8ab1-de058f7c0f52","Type":"ContainerStarted","Data":"f88d8ddd7789ae4a9aace3e6782878e1b7e3b8d2addc8f6db1872040eaef863e"} Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.402656 4926 generic.go:334] "Generic (PLEG): container finished" podID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerID="276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36" exitCode=0 Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.402726 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerDied","Data":"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36"} Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.402768 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c44886b1-1d10-4c9a-b13e-9e53d75e978b","Type":"ContainerDied","Data":"e543841382afad84aa8eb5fea36bd5642fd0aafd4a031e898bb7553cd9702a3d"} Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.402765 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.402788 4926 scope.go:117] "RemoveContainer" containerID="276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.432467 4926 scope.go:117] "RemoveContainer" containerID="aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.445223 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.462948 4926 scope.go:117] "RemoveContainer" containerID="276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36" Nov 22 10:59:51 crc kubenswrapper[4926]: E1122 10:59:51.463605 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36\": container with ID starting with 276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36 not found: ID does not exist" containerID="276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.463639 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36"} err="failed to get container status \"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36\": rpc error: code = NotFound desc = could not find container \"276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36\": container with ID starting with 276bd1d32e8322dc979982495afa86b51d567c604dc4c9f074b7ffdc47e6ee36 not found: ID does not exist" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.463666 4926 scope.go:117] "RemoveContainer" containerID="aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396" Nov 22 10:59:51 crc kubenswrapper[4926]: E1122 10:59:51.464082 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396\": container with ID starting with aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396 not found: ID does not exist" containerID="aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.464121 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396"} err="failed to get container status \"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396\": rpc error: code = NotFound desc = could not find container \"aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396\": container with ID starting with aea53ef6e98a5aeb4198c93e7ecb1c2f0d4fb3ab7c9704c5b9b9723337330396 not found: ID does not exist" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.467152 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.474527 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:51 crc kubenswrapper[4926]: E1122 10:59:51.474956 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-log" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.474973 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-log" Nov 22 10:59:51 crc kubenswrapper[4926]: E1122 10:59:51.474991 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-api" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.474998 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-api" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.475225 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-log" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.475251 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" containerName="nova-api-api" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.476292 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.483265 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.483461 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.483601 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.494748 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548064 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8k724\" (UniqueName: \"kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548123 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548171 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548219 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548251 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.548288 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.649655 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650042 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650161 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650548 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650704 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8k724\" (UniqueName: \"kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650802 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.650926 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.655746 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.656441 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.657334 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.658441 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.676862 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8k724\" (UniqueName: \"kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724\") pod \"nova-api-0\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " pod="openstack/nova-api-0" Nov 22 10:59:51 crc kubenswrapper[4926]: I1122 10:59:51.801161 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.327749 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.414861 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5e5f48cd-d405-4431-8ab1-de058f7c0f52","Type":"ContainerStarted","Data":"6b2bffa6f9dd408a1550e7d5b066a4b4df2cf1182dae9a25a1a9459d7a250821"} Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.418381 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerStarted","Data":"5175506249f2f96ac634f00c76efc55bfe4e39c4e7b3f310339cd388e5d96621"} Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.599853 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c44886b1-1d10-4c9a-b13e-9e53d75e978b" path="/var/lib/kubelet/pods/c44886b1-1d10-4c9a-b13e-9e53d75e978b/volumes" Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.965451 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:52 crc kubenswrapper[4926]: I1122 10:59:52.987302 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.428653 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerStarted","Data":"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5"} Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.428970 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerStarted","Data":"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b"} Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.430927 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5e5f48cd-d405-4431-8ab1-de058f7c0f52","Type":"ContainerStarted","Data":"b6219f6aca88bd644d63f7b008d6d49ab4a62822d0e29b01aa7b30aa07551157"} Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.430960 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5e5f48cd-d405-4431-8ab1-de058f7c0f52","Type":"ContainerStarted","Data":"ffdc919c32e8d96714385f4cbf2fd6b3aaba813f62a7420db4f008668a3cc190"} Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.473788 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.488908 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.488876003 podStartE2EDuration="2.488876003s" podCreationTimestamp="2025-11-22 10:59:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:53.451155742 +0000 UTC m=+1213.752761029" watchObservedRunningTime="2025-11-22 10:59:53.488876003 +0000 UTC m=+1213.790481290" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.781562 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-nkcgs"] Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.783478 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.795649 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.795837 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.826754 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nkcgs"] Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.932107 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.932185 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.932217 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:53 crc kubenswrapper[4926]: I1122 10:59:53.932303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sh6kr\" (UniqueName: \"kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.033941 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.034031 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.034068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.034138 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sh6kr\" (UniqueName: \"kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.041049 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.054538 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.056342 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sh6kr\" (UniqueName: \"kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.056431 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data\") pod \"nova-cell1-cell-mapping-nkcgs\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.113859 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.612615 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nkcgs"] Nov 22 10:59:54 crc kubenswrapper[4926]: W1122 10:59:54.619413 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d9f7681_83ae_40f4_b959_809724eb2498.slice/crio-8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031 WatchSource:0}: Error finding container 8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031: Status 404 returned error can't find the container with id 8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031 Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.822690 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.884380 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:54 crc kubenswrapper[4926]: I1122 10:59:54.885057 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="dnsmasq-dns" containerID="cri-o://6b87219a2657302c8f7393f9f99a82078ce754798e7fafadb126d2f9454f7919" gracePeriod=10 Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.452974 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5e5f48cd-d405-4431-8ab1-de058f7c0f52","Type":"ContainerStarted","Data":"12dd8bace013db4080ec7995ad3868507017ae83572bb379995635c2ddfb0b78"} Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.454422 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.468378 4926 generic.go:334] "Generic (PLEG): container finished" podID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerID="6b87219a2657302c8f7393f9f99a82078ce754798e7fafadb126d2f9454f7919" exitCode=0 Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.468494 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" event={"ID":"c1058caf-33f4-4f00-bb1f-fe789d442b8d","Type":"ContainerDied","Data":"6b87219a2657302c8f7393f9f99a82078ce754798e7fafadb126d2f9454f7919"} Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.468538 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" event={"ID":"c1058caf-33f4-4f00-bb1f-fe789d442b8d","Type":"ContainerDied","Data":"19e0620b5a4a31d9759740eb452cd7de858a893d544e9f69dcb49c0854fb2239"} Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.468556 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19e0620b5a4a31d9759740eb452cd7de858a893d544e9f69dcb49c0854fb2239" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.475477 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nkcgs" event={"ID":"3d9f7681-83ae-40f4-b959-809724eb2498","Type":"ContainerStarted","Data":"28fa4e89ea0a1b5044fbef33815c56031c037d853fe108da25c0e3a318c6d44b"} Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.475550 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nkcgs" event={"ID":"3d9f7681-83ae-40f4-b959-809724eb2498","Type":"ContainerStarted","Data":"8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031"} Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.503748 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.060925127 podStartE2EDuration="5.503729014s" podCreationTimestamp="2025-11-22 10:59:50 +0000 UTC" firstStartedPulling="2025-11-22 10:59:51.253527871 +0000 UTC m=+1211.555133158" lastFinishedPulling="2025-11-22 10:59:54.696331758 +0000 UTC m=+1214.997937045" observedRunningTime="2025-11-22 10:59:55.500419309 +0000 UTC m=+1215.802024606" watchObservedRunningTime="2025-11-22 10:59:55.503729014 +0000 UTC m=+1215.805334301" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.538632 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.594369 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-nkcgs" podStartSLOduration=2.594348101 podStartE2EDuration="2.594348101s" podCreationTimestamp="2025-11-22 10:59:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:59:55.544305047 +0000 UTC m=+1215.845910344" watchObservedRunningTime="2025-11-22 10:59:55.594348101 +0000 UTC m=+1215.895953388" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.673417 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.673872 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.673955 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.673973 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4pqd\" (UniqueName: \"kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.674019 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.674059 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb\") pod \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\" (UID: \"c1058caf-33f4-4f00-bb1f-fe789d442b8d\") " Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.688832 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd" (OuterVolumeSpecName: "kube-api-access-g4pqd") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "kube-api-access-g4pqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.760001 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.776743 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.776787 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4pqd\" (UniqueName: \"kubernetes.io/projected/c1058caf-33f4-4f00-bb1f-fe789d442b8d-kube-api-access-g4pqd\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.782944 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.783596 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.795415 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.804209 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config" (OuterVolumeSpecName: "config") pod "c1058caf-33f4-4f00-bb1f-fe789d442b8d" (UID: "c1058caf-33f4-4f00-bb1f-fe789d442b8d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.879560 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.879604 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.879619 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:55 crc kubenswrapper[4926]: I1122 10:59:55.879628 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c1058caf-33f4-4f00-bb1f-fe789d442b8d-config\") on node \"crc\" DevicePath \"\"" Nov 22 10:59:56 crc kubenswrapper[4926]: I1122 10:59:56.489201 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-l7kzb" Nov 22 10:59:56 crc kubenswrapper[4926]: I1122 10:59:56.524953 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:56 crc kubenswrapper[4926]: I1122 10:59:56.538665 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-l7kzb"] Nov 22 10:59:56 crc kubenswrapper[4926]: I1122 10:59:56.592894 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" path="/var/lib/kubelet/pods/c1058caf-33f4-4f00-bb1f-fe789d442b8d/volumes" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.156880 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk"] Nov 22 11:00:00 crc kubenswrapper[4926]: E1122 11:00:00.157881 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="init" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.157953 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="init" Nov 22 11:00:00 crc kubenswrapper[4926]: E1122 11:00:00.157975 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="dnsmasq-dns" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.157983 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="dnsmasq-dns" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.158232 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1058caf-33f4-4f00-bb1f-fe789d442b8d" containerName="dnsmasq-dns" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.160609 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.162613 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.163417 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.173546 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk"] Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.276941 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.277266 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br7bw\" (UniqueName: \"kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.277479 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.379042 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.379194 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br7bw\" (UniqueName: \"kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.379265 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.380092 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.385803 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.401575 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br7bw\" (UniqueName: \"kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw\") pod \"collect-profiles-29396820-t4glk\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.482195 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.527091 4926 generic.go:334] "Generic (PLEG): container finished" podID="3d9f7681-83ae-40f4-b959-809724eb2498" containerID="28fa4e89ea0a1b5044fbef33815c56031c037d853fe108da25c0e3a318c6d44b" exitCode=0 Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.527148 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nkcgs" event={"ID":"3d9f7681-83ae-40f4-b959-809724eb2498","Type":"ContainerDied","Data":"28fa4e89ea0a1b5044fbef33815c56031c037d853fe108da25c0e3a318c6d44b"} Nov 22 11:00:00 crc kubenswrapper[4926]: I1122 11:00:00.959471 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk"] Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.537497 4926 generic.go:334] "Generic (PLEG): container finished" podID="7ecb12c0-9db0-426f-9160-214011fc3f9c" containerID="de95b4e1ddd886888bca8b19ea58c41b74c64617926a49c331fe90294b3078c9" exitCode=0 Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.537676 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" event={"ID":"7ecb12c0-9db0-426f-9160-214011fc3f9c","Type":"ContainerDied","Data":"de95b4e1ddd886888bca8b19ea58c41b74c64617926a49c331fe90294b3078c9"} Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.537702 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" event={"ID":"7ecb12c0-9db0-426f-9160-214011fc3f9c","Type":"ContainerStarted","Data":"809575c5775813b0724264fe1c715e8d4b85415bd385bde1e331f9d70b1e1258"} Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.801454 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.801518 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:01 crc kubenswrapper[4926]: I1122 11:00:01.887817 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.011214 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle\") pod \"3d9f7681-83ae-40f4-b959-809724eb2498\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.011635 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data\") pod \"3d9f7681-83ae-40f4-b959-809724eb2498\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.011757 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sh6kr\" (UniqueName: \"kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr\") pod \"3d9f7681-83ae-40f4-b959-809724eb2498\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.012274 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts\") pod \"3d9f7681-83ae-40f4-b959-809724eb2498\" (UID: \"3d9f7681-83ae-40f4-b959-809724eb2498\") " Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.017177 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr" (OuterVolumeSpecName: "kube-api-access-sh6kr") pod "3d9f7681-83ae-40f4-b959-809724eb2498" (UID: "3d9f7681-83ae-40f4-b959-809724eb2498"). InnerVolumeSpecName "kube-api-access-sh6kr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.017604 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts" (OuterVolumeSpecName: "scripts") pod "3d9f7681-83ae-40f4-b959-809724eb2498" (UID: "3d9f7681-83ae-40f4-b959-809724eb2498"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.038838 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data" (OuterVolumeSpecName: "config-data") pod "3d9f7681-83ae-40f4-b959-809724eb2498" (UID: "3d9f7681-83ae-40f4-b959-809724eb2498"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.044016 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d9f7681-83ae-40f4-b959-809724eb2498" (UID: "3d9f7681-83ae-40f4-b959-809724eb2498"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.115916 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.116208 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.116328 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sh6kr\" (UniqueName: \"kubernetes.io/projected/3d9f7681-83ae-40f4-b959-809724eb2498-kube-api-access-sh6kr\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.116445 4926 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d9f7681-83ae-40f4-b959-809724eb2498-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.551130 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nkcgs" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.554482 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nkcgs" event={"ID":"3d9f7681-83ae-40f4-b959-809724eb2498","Type":"ContainerDied","Data":"8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031"} Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.554774 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8974a7ae8328f51ad6d36a3b13182cb0917640a85c0930d5267873c760dbc031" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.757115 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.757422 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-log" containerID="cri-o://7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b" gracePeriod=30 Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.758376 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-api" containerID="cri-o://b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5" gracePeriod=30 Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.780128 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": EOF" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.780161 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": EOF" Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.786264 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.786467 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerName="nova-scheduler-scheduler" containerID="cri-o://7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" gracePeriod=30 Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.800770 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.800996 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" containerID="cri-o://c931adbfb6ce5309935eaf16e5b39e8391d241760f7218d4d8a825ca5041554f" gracePeriod=30 Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.801441 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" containerID="cri-o://78c90e0d760173b471381827911256c647ae31786b71ebceffe409c8a9ec819f" gracePeriod=30 Nov 22 11:00:02 crc kubenswrapper[4926]: I1122 11:00:02.983715 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.138576 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume\") pod \"7ecb12c0-9db0-426f-9160-214011fc3f9c\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.138618 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume\") pod \"7ecb12c0-9db0-426f-9160-214011fc3f9c\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.138667 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br7bw\" (UniqueName: \"kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw\") pod \"7ecb12c0-9db0-426f-9160-214011fc3f9c\" (UID: \"7ecb12c0-9db0-426f-9160-214011fc3f9c\") " Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.139530 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume" (OuterVolumeSpecName: "config-volume") pod "7ecb12c0-9db0-426f-9160-214011fc3f9c" (UID: "7ecb12c0-9db0-426f-9160-214011fc3f9c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.149817 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7ecb12c0-9db0-426f-9160-214011fc3f9c" (UID: "7ecb12c0-9db0-426f-9160-214011fc3f9c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.149809 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw" (OuterVolumeSpecName: "kube-api-access-br7bw") pod "7ecb12c0-9db0-426f-9160-214011fc3f9c" (UID: "7ecb12c0-9db0-426f-9160-214011fc3f9c"). InnerVolumeSpecName "kube-api-access-br7bw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.241098 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7ecb12c0-9db0-426f-9160-214011fc3f9c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.241145 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7ecb12c0-9db0-426f-9160-214011fc3f9c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.241159 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br7bw\" (UniqueName: \"kubernetes.io/projected/7ecb12c0-9db0-426f-9160-214011fc3f9c-kube-api-access-br7bw\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.562973 4926 generic.go:334] "Generic (PLEG): container finished" podID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerID="7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b" exitCode=143 Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.563023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerDied","Data":"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b"} Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.564932 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" event={"ID":"7ecb12c0-9db0-426f-9160-214011fc3f9c","Type":"ContainerDied","Data":"809575c5775813b0724264fe1c715e8d4b85415bd385bde1e331f9d70b1e1258"} Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.564967 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="809575c5775813b0724264fe1c715e8d4b85415bd385bde1e331f9d70b1e1258" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.564968 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk" Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.567459 4926 generic.go:334] "Generic (PLEG): container finished" podID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerID="c931adbfb6ce5309935eaf16e5b39e8391d241760f7218d4d8a825ca5041554f" exitCode=143 Nov 22 11:00:03 crc kubenswrapper[4926]: I1122 11:00:03.567510 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerDied","Data":"c931adbfb6ce5309935eaf16e5b39e8391d241760f7218d4d8a825ca5041554f"} Nov 22 11:00:03 crc kubenswrapper[4926]: E1122 11:00:03.814307 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:03 crc kubenswrapper[4926]: E1122 11:00:03.816627 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:03 crc kubenswrapper[4926]: E1122 11:00:03.817804 4926 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 22 11:00:03 crc kubenswrapper[4926]: E1122 11:00:03.817839 4926 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerName="nova-scheduler-scheduler" Nov 22 11:00:05 crc kubenswrapper[4926]: I1122 11:00:05.962780 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": read tcp 10.217.0.2:60526->10.217.0.195:8775: read: connection reset by peer" Nov 22 11:00:05 crc kubenswrapper[4926]: I1122 11:00:05.962785 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.195:8775/\": read tcp 10.217.0.2:60536->10.217.0.195:8775: read: connection reset by peer" Nov 22 11:00:06 crc kubenswrapper[4926]: I1122 11:00:06.606272 4926 generic.go:334] "Generic (PLEG): container finished" podID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerID="78c90e0d760173b471381827911256c647ae31786b71ebceffe409c8a9ec819f" exitCode=0 Nov 22 11:00:06 crc kubenswrapper[4926]: I1122 11:00:06.606313 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerDied","Data":"78c90e0d760173b471381827911256c647ae31786b71ebceffe409c8a9ec819f"} Nov 22 11:00:06 crc kubenswrapper[4926]: I1122 11:00:06.903854 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.012851 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle\") pod \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.013265 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs\") pod \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.013307 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs\") pod \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.013429 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nld7\" (UniqueName: \"kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7\") pod \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.013483 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data\") pod \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\" (UID: \"69ff2b4a-0e22-4cb4-9f77-71753997ff1e\") " Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.013968 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs" (OuterVolumeSpecName: "logs") pod "69ff2b4a-0e22-4cb4-9f77-71753997ff1e" (UID: "69ff2b4a-0e22-4cb4-9f77-71753997ff1e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.031550 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7" (OuterVolumeSpecName: "kube-api-access-2nld7") pod "69ff2b4a-0e22-4cb4-9f77-71753997ff1e" (UID: "69ff2b4a-0e22-4cb4-9f77-71753997ff1e"). InnerVolumeSpecName "kube-api-access-2nld7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.046283 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "69ff2b4a-0e22-4cb4-9f77-71753997ff1e" (UID: "69ff2b4a-0e22-4cb4-9f77-71753997ff1e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.055388 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data" (OuterVolumeSpecName: "config-data") pod "69ff2b4a-0e22-4cb4-9f77-71753997ff1e" (UID: "69ff2b4a-0e22-4cb4-9f77-71753997ff1e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.084372 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "69ff2b4a-0e22-4cb4-9f77-71753997ff1e" (UID: "69ff2b4a-0e22-4cb4-9f77-71753997ff1e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.115603 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.115637 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.115651 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.115661 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.115672 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nld7\" (UniqueName: \"kubernetes.io/projected/69ff2b4a-0e22-4cb4-9f77-71753997ff1e-kube-api-access-2nld7\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.617260 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"69ff2b4a-0e22-4cb4-9f77-71753997ff1e","Type":"ContainerDied","Data":"c45e7a5d243289054a3e1a2022811b7b2af3fecc2636ad6902963d52bbbc4b23"} Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.617573 4926 scope.go:117] "RemoveContainer" containerID="78c90e0d760173b471381827911256c647ae31786b71ebceffe409c8a9ec819f" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.617363 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.664871 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.679705 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.682068 4926 scope.go:117] "RemoveContainer" containerID="c931adbfb6ce5309935eaf16e5b39e8391d241760f7218d4d8a825ca5041554f" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.705778 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:07 crc kubenswrapper[4926]: E1122 11:00:07.706258 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706280 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" Nov 22 11:00:07 crc kubenswrapper[4926]: E1122 11:00:07.706311 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ecb12c0-9db0-426f-9160-214011fc3f9c" containerName="collect-profiles" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706321 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ecb12c0-9db0-426f-9160-214011fc3f9c" containerName="collect-profiles" Nov 22 11:00:07 crc kubenswrapper[4926]: E1122 11:00:07.706334 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d9f7681-83ae-40f4-b959-809724eb2498" containerName="nova-manage" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706342 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d9f7681-83ae-40f4-b959-809724eb2498" containerName="nova-manage" Nov 22 11:00:07 crc kubenswrapper[4926]: E1122 11:00:07.706357 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706366 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706659 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d9f7681-83ae-40f4-b959-809724eb2498" containerName="nova-manage" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706707 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-metadata" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706726 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ecb12c0-9db0-426f-9160-214011fc3f9c" containerName="collect-profiles" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.706740 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" containerName="nova-metadata-log" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.708854 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.712397 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.712591 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.737498 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.831000 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468e2351-8b2d-4e90-bf03-218570d63fd9-logs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.831270 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-config-data\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.831303 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7cz\" (UniqueName: \"kubernetes.io/projected/468e2351-8b2d-4e90-bf03-218570d63fd9-kube-api-access-fp7cz\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.831325 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.831468 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933145 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468e2351-8b2d-4e90-bf03-218570d63fd9-logs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933230 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-config-data\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933262 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7cz\" (UniqueName: \"kubernetes.io/projected/468e2351-8b2d-4e90-bf03-218570d63fd9-kube-api-access-fp7cz\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933281 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933373 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.933811 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/468e2351-8b2d-4e90-bf03-218570d63fd9-logs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.937828 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.938832 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.939539 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/468e2351-8b2d-4e90-bf03-218570d63fd9-config-data\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:07 crc kubenswrapper[4926]: I1122 11:00:07.963354 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7cz\" (UniqueName: \"kubernetes.io/projected/468e2351-8b2d-4e90-bf03-218570d63fd9-kube-api-access-fp7cz\") pod \"nova-metadata-0\" (UID: \"468e2351-8b2d-4e90-bf03-218570d63fd9\") " pod="openstack/nova-metadata-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.069967 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.354666 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.441837 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle\") pod \"f442b1e7-127c-4808-b6c0-57a419fc276b\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.442004 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnxvq\" (UniqueName: \"kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq\") pod \"f442b1e7-127c-4808-b6c0-57a419fc276b\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.442085 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data\") pod \"f442b1e7-127c-4808-b6c0-57a419fc276b\" (UID: \"f442b1e7-127c-4808-b6c0-57a419fc276b\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.447052 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq" (OuterVolumeSpecName: "kube-api-access-wnxvq") pod "f442b1e7-127c-4808-b6c0-57a419fc276b" (UID: "f442b1e7-127c-4808-b6c0-57a419fc276b"). InnerVolumeSpecName "kube-api-access-wnxvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.478265 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f442b1e7-127c-4808-b6c0-57a419fc276b" (UID: "f442b1e7-127c-4808-b6c0-57a419fc276b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.492038 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data" (OuterVolumeSpecName: "config-data") pod "f442b1e7-127c-4808-b6c0-57a419fc276b" (UID: "f442b1e7-127c-4808-b6c0-57a419fc276b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: W1122 11:00:08.534313 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod468e2351_8b2d_4e90_bf03_218570d63fd9.slice/crio-e73dde2cd5f0dab0e4166cf14969fb4809edeb2ac9ab419dcb84c1f35d208643 WatchSource:0}: Error finding container e73dde2cd5f0dab0e4166cf14969fb4809edeb2ac9ab419dcb84c1f35d208643: Status 404 returned error can't find the container with id e73dde2cd5f0dab0e4166cf14969fb4809edeb2ac9ab419dcb84c1f35d208643 Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.535908 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.543834 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnxvq\" (UniqueName: \"kubernetes.io/projected/f442b1e7-127c-4808-b6c0-57a419fc276b-kube-api-access-wnxvq\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.543866 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.543875 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f442b1e7-127c-4808-b6c0-57a419fc276b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.577354 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.598437 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69ff2b4a-0e22-4cb4-9f77-71753997ff1e" path="/var/lib/kubelet/pods/69ff2b4a-0e22-4cb4-9f77-71753997ff1e/volumes" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.645705 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.645947 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8k724\" (UniqueName: \"kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.646029 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.646085 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.646111 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.646162 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs\") pod \"4268c17c-3adb-4912-ae27-bc528e97a6c7\" (UID: \"4268c17c-3adb-4912-ae27-bc528e97a6c7\") " Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.648063 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs" (OuterVolumeSpecName: "logs") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652482 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724" (OuterVolumeSpecName: "kube-api-access-8k724") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "kube-api-access-8k724". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652553 4926 generic.go:334] "Generic (PLEG): container finished" podID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerID="b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5" exitCode=0 Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652641 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652680 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerDied","Data":"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5"} Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652715 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"4268c17c-3adb-4912-ae27-bc528e97a6c7","Type":"ContainerDied","Data":"5175506249f2f96ac634f00c76efc55bfe4e39c4e7b3f310339cd388e5d96621"} Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.652736 4926 scope.go:117] "RemoveContainer" containerID="b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.661114 4926 generic.go:334] "Generic (PLEG): container finished" podID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" exitCode=0 Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.661164 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.661294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f442b1e7-127c-4808-b6c0-57a419fc276b","Type":"ContainerDied","Data":"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393"} Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.661369 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f442b1e7-127c-4808-b6c0-57a419fc276b","Type":"ContainerDied","Data":"8dd601b41febc5af968d0b0e351a9cc1fa354d68e0dd6d97b8372109994333cf"} Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.663725 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"468e2351-8b2d-4e90-bf03-218570d63fd9","Type":"ContainerStarted","Data":"e73dde2cd5f0dab0e4166cf14969fb4809edeb2ac9ab419dcb84c1f35d208643"} Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.681651 4926 scope.go:117] "RemoveContainer" containerID="7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.692576 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.697310 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.714158 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.719492 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data" (OuterVolumeSpecName: "config-data") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.729478 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.729608 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.729910 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerName="nova-scheduler-scheduler" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.729928 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerName="nova-scheduler-scheduler" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.729957 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-api" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.729965 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-api" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.729990 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-log" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.729996 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-log" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.730190 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-log" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.730204 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" containerName="nova-scheduler-scheduler" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.730221 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" containerName="nova-api-api" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.730836 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.739233 4926 scope.go:117] "RemoveContainer" containerID="b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.739639 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.740896 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5\": container with ID starting with b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5 not found: ID does not exist" containerID="b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.740932 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5"} err="failed to get container status \"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5\": rpc error: code = NotFound desc = could not find container \"b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5\": container with ID starting with b47f3e7fc264c7513377690ae5f6f6e677ec4c53e1b226ec013d61f3e3501bd5 not found: ID does not exist" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.740960 4926 scope.go:117] "RemoveContainer" containerID="7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.741471 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b\": container with ID starting with 7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b not found: ID does not exist" containerID="7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.741496 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b"} err="failed to get container status \"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b\": rpc error: code = NotFound desc = could not find container \"7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b\": container with ID starting with 7d86310a46f973a0c9414df72f1a86e40a6a9a1859c3f5e226570ee90972274b not found: ID does not exist" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.741516 4926 scope.go:117] "RemoveContainer" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.749351 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.749376 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.749385 4926 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.749397 4926 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4268c17c-3adb-4912-ae27-bc528e97a6c7-logs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.749405 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8k724\" (UniqueName: \"kubernetes.io/projected/4268c17c-3adb-4912-ae27-bc528e97a6c7-kube-api-access-8k724\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.751615 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.753121 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4268c17c-3adb-4912-ae27-bc528e97a6c7" (UID: "4268c17c-3adb-4912-ae27-bc528e97a6c7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.784864 4926 scope.go:117] "RemoveContainer" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" Nov 22 11:00:08 crc kubenswrapper[4926]: E1122 11:00:08.785693 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393\": container with ID starting with 7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393 not found: ID does not exist" containerID="7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.785814 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393"} err="failed to get container status \"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393\": rpc error: code = NotFound desc = could not find container \"7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393\": container with ID starting with 7b69868baac4f2ea119bf1cb8ce58ab3d05ed831b82829d98dc2ebe6c044c393 not found: ID does not exist" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.851640 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-config-data\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.852053 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.852283 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl8t2\" (UniqueName: \"kubernetes.io/projected/360b8f0a-6a7b-4772-839f-cab107433443-kube-api-access-fl8t2\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.852394 4926 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4268c17c-3adb-4912-ae27-bc528e97a6c7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.953729 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.953928 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl8t2\" (UniqueName: \"kubernetes.io/projected/360b8f0a-6a7b-4772-839f-cab107433443-kube-api-access-fl8t2\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.953979 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-config-data\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.958024 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.963847 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/360b8f0a-6a7b-4772-839f-cab107433443-config-data\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.984770 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl8t2\" (UniqueName: \"kubernetes.io/projected/360b8f0a-6a7b-4772-839f-cab107433443-kube-api-access-fl8t2\") pod \"nova-scheduler-0\" (UID: \"360b8f0a-6a7b-4772-839f-cab107433443\") " pod="openstack/nova-scheduler-0" Nov 22 11:00:08 crc kubenswrapper[4926]: I1122 11:00:08.993013 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.011264 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.033320 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.036596 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.039862 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.047937 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.048709 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.048866 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.067959 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.156828 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl8r6\" (UniqueName: \"kubernetes.io/projected/95e7c80b-edf7-42be-892c-11557c816271-kube-api-access-rl8r6\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.157235 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-config-data\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.157268 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-internal-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.157382 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-public-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.157637 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95e7c80b-edf7-42be-892c-11557c816271-logs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.157757 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263314 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-public-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95e7c80b-edf7-42be-892c-11557c816271-logs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263597 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263692 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl8r6\" (UniqueName: \"kubernetes.io/projected/95e7c80b-edf7-42be-892c-11557c816271-kube-api-access-rl8r6\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263735 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-config-data\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.263795 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-internal-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.264562 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/95e7c80b-edf7-42be-892c-11557c816271-logs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.268785 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-public-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.268855 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.269235 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-config-data\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.269788 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/95e7c80b-edf7-42be-892c-11557c816271-internal-tls-certs\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.286981 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl8r6\" (UniqueName: \"kubernetes.io/projected/95e7c80b-edf7-42be-892c-11557c816271-kube-api-access-rl8r6\") pod \"nova-api-0\" (UID: \"95e7c80b-edf7-42be-892c-11557c816271\") " pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.471115 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.534426 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.674769 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"360b8f0a-6a7b-4772-839f-cab107433443","Type":"ContainerStarted","Data":"a4b5e3bb5d7d6c4f079a8b61471523d579ef5116fa9d962f4e33932d6ec090ac"} Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.683741 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"468e2351-8b2d-4e90-bf03-218570d63fd9","Type":"ContainerStarted","Data":"f7103ee715dd014b85b7a1e11a7fc405238c2f8c4e526ffa5995b05725b653b3"} Nov 22 11:00:09 crc kubenswrapper[4926]: I1122 11:00:09.683788 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"468e2351-8b2d-4e90-bf03-218570d63fd9","Type":"ContainerStarted","Data":"87eeac073353f1a7c4754f6543633a330392e19048a71ae2c08c197df45f0f26"} Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.008021 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.007990777 podStartE2EDuration="3.007990777s" podCreationTimestamp="2025-11-22 11:00:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:09.715296986 +0000 UTC m=+1230.016902293" watchObservedRunningTime="2025-11-22 11:00:10.007990777 +0000 UTC m=+1230.309596064" Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.012297 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 11:00:10 crc kubenswrapper[4926]: W1122 11:00:10.013856 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod95e7c80b_edf7_42be_892c_11557c816271.slice/crio-adb26ec1778bdfbf85efd454aba15e692a8d4dc38f92ab777ddc19c86987b7dc WatchSource:0}: Error finding container adb26ec1778bdfbf85efd454aba15e692a8d4dc38f92ab777ddc19c86987b7dc: Status 404 returned error can't find the container with id adb26ec1778bdfbf85efd454aba15e692a8d4dc38f92ab777ddc19c86987b7dc Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.599723 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4268c17c-3adb-4912-ae27-bc528e97a6c7" path="/var/lib/kubelet/pods/4268c17c-3adb-4912-ae27-bc528e97a6c7/volumes" Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.600554 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f442b1e7-127c-4808-b6c0-57a419fc276b" path="/var/lib/kubelet/pods/f442b1e7-127c-4808-b6c0-57a419fc276b/volumes" Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.695965 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"360b8f0a-6a7b-4772-839f-cab107433443","Type":"ContainerStarted","Data":"f8a89a28d9cd2e46b79a36b106d5e27f9cbc0ad7aefda7e051e149ae28506f7d"} Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.699344 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"95e7c80b-edf7-42be-892c-11557c816271","Type":"ContainerStarted","Data":"432b1839ad5a563a5696839dd59686cb0e9939afa05ad1490e82a7550d6047e7"} Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.699377 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"95e7c80b-edf7-42be-892c-11557c816271","Type":"ContainerStarted","Data":"ecdf970cd7765c1a15705c972500113d9a0acc756ce657b234cb60d693c6cb93"} Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.699387 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"95e7c80b-edf7-42be-892c-11557c816271","Type":"ContainerStarted","Data":"adb26ec1778bdfbf85efd454aba15e692a8d4dc38f92ab777ddc19c86987b7dc"} Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.745593 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.745571831 podStartE2EDuration="2.745571831s" podCreationTimestamp="2025-11-22 11:00:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:10.720543344 +0000 UTC m=+1231.022148631" watchObservedRunningTime="2025-11-22 11:00:10.745571831 +0000 UTC m=+1231.047177118" Nov 22 11:00:10 crc kubenswrapper[4926]: I1122 11:00:10.746047 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.746040325 podStartE2EDuration="2.746040325s" podCreationTimestamp="2025-11-22 11:00:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:10.73923828 +0000 UTC m=+1231.040843567" watchObservedRunningTime="2025-11-22 11:00:10.746040325 +0000 UTC m=+1231.047645632" Nov 22 11:00:13 crc kubenswrapper[4926]: I1122 11:00:13.070545 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:00:13 crc kubenswrapper[4926]: I1122 11:00:13.070949 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 11:00:14 crc kubenswrapper[4926]: I1122 11:00:14.068845 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 11:00:18 crc kubenswrapper[4926]: I1122 11:00:18.070876 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:00:18 crc kubenswrapper[4926]: I1122 11:00:18.071472 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.069241 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.083175 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="468e2351-8b2d-4e90-bf03-218570d63fd9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.083235 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="468e2351-8b2d-4e90-bf03-218570d63fd9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.113369 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.472373 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.472875 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 11:00:19 crc kubenswrapper[4926]: I1122 11:00:19.872767 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 11:00:20 crc kubenswrapper[4926]: I1122 11:00:20.495225 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="95e7c80b-edf7-42be-892c-11557c816271" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:20 crc kubenswrapper[4926]: I1122 11:00:20.495215 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="95e7c80b-edf7-42be-892c-11557c816271" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 11:00:20 crc kubenswrapper[4926]: I1122 11:00:20.792351 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 11:00:28 crc kubenswrapper[4926]: I1122 11:00:28.080361 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:00:28 crc kubenswrapper[4926]: I1122 11:00:28.081666 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 11:00:28 crc kubenswrapper[4926]: I1122 11:00:28.085917 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:00:28 crc kubenswrapper[4926]: I1122 11:00:28.860686 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.479065 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.479469 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.482318 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.498276 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.867015 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 11:00:29 crc kubenswrapper[4926]: I1122 11:00:29.876654 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 11:00:37 crc kubenswrapper[4926]: I1122 11:00:37.698413 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:39 crc kubenswrapper[4926]: I1122 11:00:39.018518 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:41 crc kubenswrapper[4926]: I1122 11:00:41.866434 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="rabbitmq" containerID="cri-o://84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796" gracePeriod=604796 Nov 22 11:00:43 crc kubenswrapper[4926]: I1122 11:00:43.004960 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="rabbitmq" containerID="cri-o://5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06" gracePeriod=604797 Nov 22 11:00:44 crc kubenswrapper[4926]: I1122 11:00:44.594046 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.100:5671: connect: connection refused" Nov 22 11:00:44 crc kubenswrapper[4926]: I1122 11:00:44.805277 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.101:5671: connect: connection refused" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.443408 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615282 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615362 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615431 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n82q\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615523 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615538 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615608 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615629 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615674 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615702 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.615727 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie\") pod \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\" (UID: \"cb1c4cdf-86c1-4770-b406-87cb1ea92552\") " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.616170 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.616586 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.616591 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.617347 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.621784 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.621787 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info" (OuterVolumeSpecName: "pod-info") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.622396 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.637637 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.649965 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q" (OuterVolumeSpecName: "kube-api-access-8n82q") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "kube-api-access-8n82q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.673479 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data" (OuterVolumeSpecName: "config-data") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.697850 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf" (OuterVolumeSpecName: "server-conf") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719479 4926 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/cb1c4cdf-86c1-4770-b406-87cb1ea92552-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719524 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719535 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n82q\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-kube-api-access-8n82q\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719544 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719553 4926 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719561 4926 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719569 4926 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/cb1c4cdf-86c1-4770-b406-87cb1ea92552-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719577 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cb1c4cdf-86c1-4770-b406-87cb1ea92552-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.719585 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.749360 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.768745 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "cb1c4cdf-86c1-4770-b406-87cb1ea92552" (UID: "cb1c4cdf-86c1-4770-b406-87cb1ea92552"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.821275 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:48 crc kubenswrapper[4926]: I1122 11:00:48.821312 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/cb1c4cdf-86c1-4770-b406-87cb1ea92552-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.062260 4926 generic.go:334] "Generic (PLEG): container finished" podID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerID="84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796" exitCode=0 Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.062355 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.062336 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerDied","Data":"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796"} Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.062677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"cb1c4cdf-86c1-4770-b406-87cb1ea92552","Type":"ContainerDied","Data":"0b419579f8605414e46f947c15d8d5648d45958e9445d4c27227b6294a4dbf1a"} Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.062698 4926 scope.go:117] "RemoveContainer" containerID="84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.096206 4926 scope.go:117] "RemoveContainer" containerID="6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.145847 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.173074 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.183457 4926 scope.go:117] "RemoveContainer" containerID="84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796" Nov 22 11:00:49 crc kubenswrapper[4926]: E1122 11:00:49.185504 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796\": container with ID starting with 84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796 not found: ID does not exist" containerID="84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.185553 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796"} err="failed to get container status \"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796\": rpc error: code = NotFound desc = could not find container \"84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796\": container with ID starting with 84686b899f9045caf92860c610c762fee79af52ccc158673bbe452783aa18796 not found: ID does not exist" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.185579 4926 scope.go:117] "RemoveContainer" containerID="6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82" Nov 22 11:00:49 crc kubenswrapper[4926]: E1122 11:00:49.186045 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82\": container with ID starting with 6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82 not found: ID does not exist" containerID="6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.186069 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82"} err="failed to get container status \"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82\": rpc error: code = NotFound desc = could not find container \"6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82\": container with ID starting with 6afc1ba4f910d558dd79a31516f29391702084f519cdd3bc3789a3258e780b82 not found: ID does not exist" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.192615 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:49 crc kubenswrapper[4926]: E1122 11:00:49.193027 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="rabbitmq" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.193044 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="rabbitmq" Nov 22 11:00:49 crc kubenswrapper[4926]: E1122 11:00:49.193095 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="setup-container" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.193103 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="setup-container" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.193278 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" containerName="rabbitmq" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.194316 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.196234 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.196427 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.196822 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.197111 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.197262 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.197434 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.197710 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-r895p" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.207550 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330126 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330172 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330250 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330268 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330288 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330326 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-config-data\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330346 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330380 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330394 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxpnr\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-kube-api-access-hxpnr\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330423 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.330445 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.432856 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-config-data\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.432950 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.432993 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxpnr\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-kube-api-access-hxpnr\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433014 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433047 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433107 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433127 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433181 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433197 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433216 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.433961 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-config-data\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.437782 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.438080 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.438316 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.438325 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.441005 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.451631 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.453307 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.458466 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.493417 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.493561 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxpnr\" (UniqueName: \"kubernetes.io/projected/9bcfa04c-3c9e-47a5-946e-d7c42d3cefda-kube-api-access-hxpnr\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.531304 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda\") " pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.594665 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.620025 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738134 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s476h\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738216 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738263 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738325 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738380 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738432 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738466 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738492 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738546 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738567 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.738616 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info\") pod \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\" (UID: \"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce\") " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.740154 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.742336 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.744704 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.754875 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h" (OuterVolumeSpecName: "kube-api-access-s476h") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "kube-api-access-s476h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.768851 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "persistence") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.775998 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.778210 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info" (OuterVolumeSpecName: "pod-info") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.780187 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.781885 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data" (OuterVolumeSpecName: "config-data") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.816076 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf" (OuterVolumeSpecName: "server-conf") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845330 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845369 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845407 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845419 4926 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845432 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s476h\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-kube-api-access-s476h\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845446 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845458 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845469 4926 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845480 4926 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.845491 4926 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.865665 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.877183 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" (UID: "12c9d69e-19d9-4c70-a080-c3d3fe0c33ce"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.947676 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:49 crc kubenswrapper[4926]: I1122 11:00:49.947718 4926 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.086489 4926 generic.go:334] "Generic (PLEG): container finished" podID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerID="5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06" exitCode=0 Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.086534 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerDied","Data":"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06"} Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.086560 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"12c9d69e-19d9-4c70-a080-c3d3fe0c33ce","Type":"ContainerDied","Data":"bd0a4bd4b8f9eee343783720cbb0a43220efd18bffa8ac140ff949fcddfbada1"} Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.086576 4926 scope.go:117] "RemoveContainer" containerID="5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.086586 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.089492 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:00:50 crc kubenswrapper[4926]: E1122 11:00:50.089932 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="setup-container" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.089951 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="setup-container" Nov 22 11:00:50 crc kubenswrapper[4926]: E1122 11:00:50.089968 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="rabbitmq" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.089977 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="rabbitmq" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.090201 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" containerName="rabbitmq" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.101310 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.103794 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.126182 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.166573 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.189010 4926 scope.go:117] "RemoveContainer" containerID="2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255143 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255202 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255271 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhlpr\" (UniqueName: \"kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255302 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255333 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255446 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.255482 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.311692 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.326271 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.333330 4926 scope.go:117] "RemoveContainer" containerID="5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06" Nov 22 11:00:50 crc kubenswrapper[4926]: E1122 11:00:50.333791 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06\": container with ID starting with 5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06 not found: ID does not exist" containerID="5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.333823 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06"} err="failed to get container status \"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06\": rpc error: code = NotFound desc = could not find container \"5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06\": container with ID starting with 5ed55c3bf3a170998c779817fd8b5d174706e30619b777cab3e7447475c38a06 not found: ID does not exist" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.333855 4926 scope.go:117] "RemoveContainer" containerID="2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85" Nov 22 11:00:50 crc kubenswrapper[4926]: E1122 11:00:50.334153 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85\": container with ID starting with 2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85 not found: ID does not exist" containerID="2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.334184 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85"} err="failed to get container status \"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85\": rpc error: code = NotFound desc = could not find container \"2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85\": container with ID starting with 2b35ef8decf1054a6f8673335a0252ebd211d1a9e3f03b61a81ed88a5631be85 not found: ID does not exist" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.346023 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.347497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.352218 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.352386 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.352525 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.354788 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.355001 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.355104 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.355225 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qd6nx" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.356875 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.356933 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.356994 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhlpr\" (UniqueName: \"kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.357020 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.357048 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.357081 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.357106 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.357323 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.358047 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.358622 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.359147 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.360353 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.360817 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.360833 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.383762 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhlpr\" (UniqueName: \"kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr\") pod \"dnsmasq-dns-5576978c7c-qvf5n\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.458931 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459223 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459351 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5s8pv\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-kube-api-access-5s8pv\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459487 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459508 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459526 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5bd13931-4b28-4235-a779-aea2a515351e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459570 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459590 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.459662 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5bd13931-4b28-4235-a779-aea2a515351e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.562078 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5bd13931-4b28-4235-a779-aea2a515351e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.562172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.562303 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563121 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563188 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563266 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5s8pv\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-kube-api-access-5s8pv\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563317 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563605 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5bd13931-4b28-4235-a779-aea2a515351e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563614 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563651 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563779 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563809 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.563845 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.564582 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.564869 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.565048 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/5bd13931-4b28-4235-a779-aea2a515351e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.566235 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/5bd13931-4b28-4235-a779-aea2a515351e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.566604 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.567748 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/5bd13931-4b28-4235-a779-aea2a515351e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.569725 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.586057 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5s8pv\" (UniqueName: \"kubernetes.io/projected/5bd13931-4b28-4235-a779-aea2a515351e-kube-api-access-5s8pv\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.602916 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12c9d69e-19d9-4c70-a080-c3d3fe0c33ce" path="/var/lib/kubelet/pods/12c9d69e-19d9-4c70-a080-c3d3fe0c33ce/volumes" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.603941 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb1c4cdf-86c1-4770-b406-87cb1ea92552" path="/var/lib/kubelet/pods/cb1c4cdf-86c1-4770-b406-87cb1ea92552/volumes" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.607249 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"5bd13931-4b28-4235-a779-aea2a515351e\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.682584 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:00:50 crc kubenswrapper[4926]: I1122 11:00:50.707497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:00:51 crc kubenswrapper[4926]: I1122 11:00:51.109303 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda","Type":"ContainerStarted","Data":"514a5855ee2773dfa6e6ab1bdb29852acfe85eaabbbb0a53280b5eb316369091"} Nov 22 11:00:51 crc kubenswrapper[4926]: I1122 11:00:51.166820 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:00:51 crc kubenswrapper[4926]: W1122 11:00:51.170776 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb63d0010_bc67_416c_bdbd_cd9dc80b6c9d.slice/crio-e1d982030cc68c675b2286622db145a4a823646b57ffa325a8c69d616abebd8f WatchSource:0}: Error finding container e1d982030cc68c675b2286622db145a4a823646b57ffa325a8c69d616abebd8f: Status 404 returned error can't find the container with id e1d982030cc68c675b2286622db145a4a823646b57ffa325a8c69d616abebd8f Nov 22 11:00:51 crc kubenswrapper[4926]: I1122 11:00:51.246639 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 11:00:51 crc kubenswrapper[4926]: W1122 11:00:51.248736 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5bd13931_4b28_4235_a779_aea2a515351e.slice/crio-453a2664e06ebb0f773bf65f0360acf82f7121360007726328de75d728e1f04a WatchSource:0}: Error finding container 453a2664e06ebb0f773bf65f0360acf82f7121360007726328de75d728e1f04a: Status 404 returned error can't find the container with id 453a2664e06ebb0f773bf65f0360acf82f7121360007726328de75d728e1f04a Nov 22 11:00:52 crc kubenswrapper[4926]: I1122 11:00:52.124997 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5bd13931-4b28-4235-a779-aea2a515351e","Type":"ContainerStarted","Data":"453a2664e06ebb0f773bf65f0360acf82f7121360007726328de75d728e1f04a"} Nov 22 11:00:52 crc kubenswrapper[4926]: I1122 11:00:52.128402 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda","Type":"ContainerStarted","Data":"23b663ee825578ffe1cf44d00e1122247140b7ddc2c06846baf00bd9bd32ede3"} Nov 22 11:00:52 crc kubenswrapper[4926]: I1122 11:00:52.130849 4926 generic.go:334] "Generic (PLEG): container finished" podID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerID="9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98" exitCode=0 Nov 22 11:00:52 crc kubenswrapper[4926]: I1122 11:00:52.130924 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" event={"ID":"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d","Type":"ContainerDied","Data":"9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98"} Nov 22 11:00:52 crc kubenswrapper[4926]: I1122 11:00:52.131034 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" event={"ID":"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d","Type":"ContainerStarted","Data":"e1d982030cc68c675b2286622db145a4a823646b57ffa325a8c69d616abebd8f"} Nov 22 11:00:53 crc kubenswrapper[4926]: I1122 11:00:53.143443 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5bd13931-4b28-4235-a779-aea2a515351e","Type":"ContainerStarted","Data":"21eb39d3d942eb0a15832d941ec08099f5211106ad5790d4d1182e36679f7a11"} Nov 22 11:00:53 crc kubenswrapper[4926]: I1122 11:00:53.146832 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" event={"ID":"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d","Type":"ContainerStarted","Data":"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322"} Nov 22 11:00:53 crc kubenswrapper[4926]: I1122 11:00:53.213339 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" podStartSLOduration=3.213321171 podStartE2EDuration="3.213321171s" podCreationTimestamp="2025-11-22 11:00:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:00:53.203110278 +0000 UTC m=+1273.504715605" watchObservedRunningTime="2025-11-22 11:00:53.213321171 +0000 UTC m=+1273.514926448" Nov 22 11:00:54 crc kubenswrapper[4926]: I1122 11:00:54.155922 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.151594 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29396821-jtvch"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.153780 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.160175 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396821-jtvch"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.254902 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.254956 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.255035 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.255151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdf5t\" (UniqueName: \"kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.356971 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.357096 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdf5t\" (UniqueName: \"kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.357169 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.357204 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.363338 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.373153 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.374533 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.376768 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdf5t\" (UniqueName: \"kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t\") pod \"keystone-cron-29396821-jtvch\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.484146 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.684069 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.756801 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.757429 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="dnsmasq-dns" containerID="cri-o://3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915" gracePeriod=10 Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.882918 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-m2jnh"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.885239 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.908431 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-m2jnh"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.957700 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396821-jtvch"] Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969395 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969561 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969655 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42z8m\" (UniqueName: \"kubernetes.io/projected/17f914e9-40ef-4428-817c-6f72279f844f-kube-api-access-42z8m\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969764 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:00 crc kubenswrapper[4926]: I1122 11:01:00.969802 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-config\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.071379 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.071725 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.071783 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.071815 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.071866 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42z8m\" (UniqueName: \"kubernetes.io/projected/17f914e9-40ef-4428-817c-6f72279f844f-kube-api-access-42z8m\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.072034 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.072076 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-config\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.072452 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.072452 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.073177 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.073380 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.073420 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-config\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.073911 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/17f914e9-40ef-4428-817c-6f72279f844f-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.093453 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42z8m\" (UniqueName: \"kubernetes.io/projected/17f914e9-40ef-4428-817c-6f72279f844f-kube-api-access-42z8m\") pod \"dnsmasq-dns-8c6f6df99-m2jnh\" (UID: \"17f914e9-40ef-4428-817c-6f72279f844f\") " pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.158279 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.225132 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-jtvch" event={"ID":"a8024291-de1f-49c8-bac5-b4d37978639d","Type":"ContainerStarted","Data":"1a7c3b931bb862d96f6f7c1664f2eebc5510efeabd452bf0f5c4e16d93bfee22"} Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.225179 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-jtvch" event={"ID":"a8024291-de1f-49c8-bac5-b4d37978639d","Type":"ContainerStarted","Data":"26225de751f489c46a8b5edd432cdd5aadb61fab01f31d1f96ce2be877aade80"} Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.228680 4926 generic.go:334] "Generic (PLEG): container finished" podID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerID="3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915" exitCode=0 Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.228720 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" event={"ID":"13ac548d-31ed-4cec-b356-ce3ae008af91","Type":"ContainerDied","Data":"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915"} Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.228735 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.228748 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-ptcmn" event={"ID":"13ac548d-31ed-4cec-b356-ce3ae008af91","Type":"ContainerDied","Data":"d39b1a50901da18eee1d77d29ffde4cb3461badebfb56cec4b6cbcd1530efff9"} Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.228832 4926 scope.go:117] "RemoveContainer" containerID="3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.238170 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.260497 4926 scope.go:117] "RemoveContainer" containerID="be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.276615 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.276664 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.276815 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.277628 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.277670 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.277753 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.283703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j" (OuterVolumeSpecName: "kube-api-access-zr59j") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "kube-api-access-zr59j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.291490 4926 scope.go:117] "RemoveContainer" containerID="3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915" Nov 22 11:01:01 crc kubenswrapper[4926]: E1122 11:01:01.295402 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915\": container with ID starting with 3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915 not found: ID does not exist" containerID="3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.295439 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915"} err="failed to get container status \"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915\": rpc error: code = NotFound desc = could not find container \"3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915\": container with ID starting with 3253ee51a2d93777d0d723a625a9c2475f68d2bffff1f5d64381f55454a78915 not found: ID does not exist" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.295469 4926 scope.go:117] "RemoveContainer" containerID="be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78" Nov 22 11:01:01 crc kubenswrapper[4926]: E1122 11:01:01.295838 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78\": container with ID starting with be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78 not found: ID does not exist" containerID="be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.295861 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78"} err="failed to get container status \"be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78\": rpc error: code = NotFound desc = could not find container \"be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78\": container with ID starting with be3e1bca5376041bea116cbf58fce7031039203940cbd4075d97ea38c45c9f78 not found: ID does not exist" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.322713 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.325771 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.327651 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config" (OuterVolumeSpecName: "config") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: E1122 11:01:01.335325 4926 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc podName:13ac548d-31ed-4cec-b356-ce3ae008af91 nodeName:}" failed. No retries permitted until 2025-11-22 11:01:01.835296299 +0000 UTC m=+1282.136901596 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "dns-svc" (UniqueName: "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91") : error deleting /var/lib/kubelet/pods/13ac548d-31ed-4cec-b356-ce3ae008af91/volume-subpaths: remove /var/lib/kubelet/pods/13ac548d-31ed-4cec-b356-ce3ae008af91/volume-subpaths: no such file or directory Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.335591 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.380711 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.380738 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.380747 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zr59j\" (UniqueName: \"kubernetes.io/projected/13ac548d-31ed-4cec-b356-ce3ae008af91-kube-api-access-zr59j\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.380756 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.380763 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.773502 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-m2jnh"] Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.891183 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") pod \"13ac548d-31ed-4cec-b356-ce3ae008af91\" (UID: \"13ac548d-31ed-4cec-b356-ce3ae008af91\") " Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.891560 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "13ac548d-31ed-4cec-b356-ce3ae008af91" (UID: "13ac548d-31ed-4cec-b356-ce3ae008af91"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:01 crc kubenswrapper[4926]: I1122 11:01:01.892386 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/13ac548d-31ed-4cec-b356-ce3ae008af91-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.168520 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.180393 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-ptcmn"] Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.239920 4926 generic.go:334] "Generic (PLEG): container finished" podID="17f914e9-40ef-4428-817c-6f72279f844f" containerID="a7340e72b72f247152c8c5b16f8453ac211c519b77488579b8bb4fa8ae18caa9" exitCode=0 Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.240243 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" event={"ID":"17f914e9-40ef-4428-817c-6f72279f844f","Type":"ContainerDied","Data":"a7340e72b72f247152c8c5b16f8453ac211c519b77488579b8bb4fa8ae18caa9"} Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.240326 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" event={"ID":"17f914e9-40ef-4428-817c-6f72279f844f","Type":"ContainerStarted","Data":"070c3ff82b98e6997e8399f4e22579925011855f9def055a9151a3e0f8b626c6"} Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.281194 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29396821-jtvch" podStartSLOduration=2.281174226 podStartE2EDuration="2.281174226s" podCreationTimestamp="2025-11-22 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:02.280643031 +0000 UTC m=+1282.582248318" watchObservedRunningTime="2025-11-22 11:01:02.281174226 +0000 UTC m=+1282.582779513" Nov 22 11:01:02 crc kubenswrapper[4926]: I1122 11:01:02.591865 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" path="/var/lib/kubelet/pods/13ac548d-31ed-4cec-b356-ce3ae008af91/volumes" Nov 22 11:01:03 crc kubenswrapper[4926]: I1122 11:01:03.251320 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" event={"ID":"17f914e9-40ef-4428-817c-6f72279f844f","Type":"ContainerStarted","Data":"9521ecdcddebf3a609904cdb3d5d61cf689419be647d21c1ebcaf04c506fa1f1"} Nov 22 11:01:03 crc kubenswrapper[4926]: I1122 11:01:03.251612 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:03 crc kubenswrapper[4926]: I1122 11:01:03.252816 4926 generic.go:334] "Generic (PLEG): container finished" podID="a8024291-de1f-49c8-bac5-b4d37978639d" containerID="1a7c3b931bb862d96f6f7c1664f2eebc5510efeabd452bf0f5c4e16d93bfee22" exitCode=0 Nov 22 11:01:03 crc kubenswrapper[4926]: I1122 11:01:03.252900 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-jtvch" event={"ID":"a8024291-de1f-49c8-bac5-b4d37978639d","Type":"ContainerDied","Data":"1a7c3b931bb862d96f6f7c1664f2eebc5510efeabd452bf0f5c4e16d93bfee22"} Nov 22 11:01:03 crc kubenswrapper[4926]: I1122 11:01:03.272159 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" podStartSLOduration=3.272142124 podStartE2EDuration="3.272142124s" podCreationTimestamp="2025-11-22 11:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:03.269797137 +0000 UTC m=+1283.571402434" watchObservedRunningTime="2025-11-22 11:01:03.272142124 +0000 UTC m=+1283.573747431" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.591515 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.646338 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle\") pod \"a8024291-de1f-49c8-bac5-b4d37978639d\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.646680 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data\") pod \"a8024291-de1f-49c8-bac5-b4d37978639d\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.646973 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdf5t\" (UniqueName: \"kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t\") pod \"a8024291-de1f-49c8-bac5-b4d37978639d\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.647581 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys\") pod \"a8024291-de1f-49c8-bac5-b4d37978639d\" (UID: \"a8024291-de1f-49c8-bac5-b4d37978639d\") " Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.652145 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a8024291-de1f-49c8-bac5-b4d37978639d" (UID: "a8024291-de1f-49c8-bac5-b4d37978639d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.652613 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t" (OuterVolumeSpecName: "kube-api-access-qdf5t") pod "a8024291-de1f-49c8-bac5-b4d37978639d" (UID: "a8024291-de1f-49c8-bac5-b4d37978639d"). InnerVolumeSpecName "kube-api-access-qdf5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.676362 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a8024291-de1f-49c8-bac5-b4d37978639d" (UID: "a8024291-de1f-49c8-bac5-b4d37978639d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.698714 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data" (OuterVolumeSpecName: "config-data") pod "a8024291-de1f-49c8-bac5-b4d37978639d" (UID: "a8024291-de1f-49c8-bac5-b4d37978639d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.750427 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdf5t\" (UniqueName: \"kubernetes.io/projected/a8024291-de1f-49c8-bac5-b4d37978639d-kube-api-access-qdf5t\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.750466 4926 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.750479 4926 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:04 crc kubenswrapper[4926]: I1122 11:01:04.750488 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8024291-de1f-49c8-bac5-b4d37978639d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:05 crc kubenswrapper[4926]: I1122 11:01:05.272993 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396821-jtvch" event={"ID":"a8024291-de1f-49c8-bac5-b4d37978639d","Type":"ContainerDied","Data":"26225de751f489c46a8b5edd432cdd5aadb61fab01f31d1f96ce2be877aade80"} Nov 22 11:01:05 crc kubenswrapper[4926]: I1122 11:01:05.273033 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26225de751f489c46a8b5edd432cdd5aadb61fab01f31d1f96ce2be877aade80" Nov 22 11:01:05 crc kubenswrapper[4926]: I1122 11:01:05.273409 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396821-jtvch" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.241142 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8c6f6df99-m2jnh" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.297203 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.297508 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="dnsmasq-dns" containerID="cri-o://508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322" gracePeriod=10 Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.774777 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884192 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884276 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884302 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884351 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhlpr\" (UniqueName: \"kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884380 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.884445 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0\") pod \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\" (UID: \"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d\") " Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.890178 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr" (OuterVolumeSpecName: "kube-api-access-fhlpr") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "kube-api-access-fhlpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.939029 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.942386 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.942773 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config" (OuterVolumeSpecName: "config") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.943549 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.946005 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.954403 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" (UID: "b63d0010-bc67-416c-bdbd-cd9dc80b6c9d"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986651 4926 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986690 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986706 4926 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986716 4926 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986727 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhlpr\" (UniqueName: \"kubernetes.io/projected/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-kube-api-access-fhlpr\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986738 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:11 crc kubenswrapper[4926]: I1122 11:01:11.986748 4926 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.338467 4926 generic.go:334] "Generic (PLEG): container finished" podID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerID="508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322" exitCode=0 Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.338524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" event={"ID":"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d","Type":"ContainerDied","Data":"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322"} Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.338821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" event={"ID":"b63d0010-bc67-416c-bdbd-cd9dc80b6c9d","Type":"ContainerDied","Data":"e1d982030cc68c675b2286622db145a4a823646b57ffa325a8c69d616abebd8f"} Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.338847 4926 scope.go:117] "RemoveContainer" containerID="508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.338573 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-qvf5n" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.371132 4926 scope.go:117] "RemoveContainer" containerID="9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.372870 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.391066 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-qvf5n"] Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.415968 4926 scope.go:117] "RemoveContainer" containerID="508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322" Nov 22 11:01:12 crc kubenswrapper[4926]: E1122 11:01:12.416420 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322\": container with ID starting with 508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322 not found: ID does not exist" containerID="508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.416453 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322"} err="failed to get container status \"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322\": rpc error: code = NotFound desc = could not find container \"508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322\": container with ID starting with 508b9aa1debea837dfad28399a34172c9919298091b4a8fabd714977fa96e322 not found: ID does not exist" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.416501 4926 scope.go:117] "RemoveContainer" containerID="9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98" Nov 22 11:01:12 crc kubenswrapper[4926]: E1122 11:01:12.416761 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98\": container with ID starting with 9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98 not found: ID does not exist" containerID="9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.416786 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98"} err="failed to get container status \"9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98\": rpc error: code = NotFound desc = could not find container \"9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98\": container with ID starting with 9348d90ad999e7d40eb47b23cc5b987129f69f7aba2aea57c0e13b27d3337e98 not found: ID does not exist" Nov 22 11:01:12 crc kubenswrapper[4926]: I1122 11:01:12.596610 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" path="/var/lib/kubelet/pods/b63d0010-bc67-416c-bdbd-cd9dc80b6c9d/volumes" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.893074 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p"] Nov 22 11:01:19 crc kubenswrapper[4926]: E1122 11:01:19.893961 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.893972 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: E1122 11:01:19.893981 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.893988 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: E1122 11:01:19.894010 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="init" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894016 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="init" Nov 22 11:01:19 crc kubenswrapper[4926]: E1122 11:01:19.894027 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="init" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894032 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="init" Nov 22 11:01:19 crc kubenswrapper[4926]: E1122 11:01:19.894055 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8024291-de1f-49c8-bac5-b4d37978639d" containerName="keystone-cron" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894063 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8024291-de1f-49c8-bac5-b4d37978639d" containerName="keystone-cron" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894256 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b63d0010-bc67-416c-bdbd-cd9dc80b6c9d" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894277 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8024291-de1f-49c8-bac5-b4d37978639d" containerName="keystone-cron" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.894293 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="13ac548d-31ed-4cec-b356-ce3ae008af91" containerName="dnsmasq-dns" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.895518 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.897568 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.898368 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.898412 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.898362 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.921606 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p"] Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.931245 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2gsf\" (UniqueName: \"kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.931310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.931429 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:19 crc kubenswrapper[4926]: I1122 11:01:19.931664 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.033681 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.033776 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2gsf\" (UniqueName: \"kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.033818 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.033942 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.039840 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.040614 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.041785 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.066653 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2gsf\" (UniqueName: \"kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:20 crc kubenswrapper[4926]: I1122 11:01:20.217703 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:21 crc kubenswrapper[4926]: I1122 11:01:21.271525 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p"] Nov 22 11:01:21 crc kubenswrapper[4926]: I1122 11:01:21.275684 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:01:21 crc kubenswrapper[4926]: I1122 11:01:21.417853 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" event={"ID":"9c3831ca-e426-4b08-ad83-050cbedbd547","Type":"ContainerStarted","Data":"5bc6d927ad08d2ff9b9f8910d5de3229c545ffec12a713ed7b7bb8357ec60d27"} Nov 22 11:01:24 crc kubenswrapper[4926]: I1122 11:01:24.454410 4926 generic.go:334] "Generic (PLEG): container finished" podID="9bcfa04c-3c9e-47a5-946e-d7c42d3cefda" containerID="23b663ee825578ffe1cf44d00e1122247140b7ddc2c06846baf00bd9bd32ede3" exitCode=0 Nov 22 11:01:24 crc kubenswrapper[4926]: I1122 11:01:24.454532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda","Type":"ContainerDied","Data":"23b663ee825578ffe1cf44d00e1122247140b7ddc2c06846baf00bd9bd32ede3"} Nov 22 11:01:25 crc kubenswrapper[4926]: I1122 11:01:25.469410 4926 generic.go:334] "Generic (PLEG): container finished" podID="5bd13931-4b28-4235-a779-aea2a515351e" containerID="21eb39d3d942eb0a15832d941ec08099f5211106ad5790d4d1182e36679f7a11" exitCode=0 Nov 22 11:01:25 crc kubenswrapper[4926]: I1122 11:01:25.469528 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5bd13931-4b28-4235-a779-aea2a515351e","Type":"ContainerDied","Data":"21eb39d3d942eb0a15832d941ec08099f5211106ad5790d4d1182e36679f7a11"} Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.513295 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"5bd13931-4b28-4235-a779-aea2a515351e","Type":"ContainerStarted","Data":"47cea88dfa7c04de17bbcaa8d83daa513895bee7d0cb973b431c7d4cd2d4839b"} Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.514286 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.515255 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" event={"ID":"9c3831ca-e426-4b08-ad83-050cbedbd547","Type":"ContainerStarted","Data":"3d9b354e1d1a1af1b286c1982c537f8ed1e9dbecfe1c3bca86b1d9454ea8d874"} Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.519183 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9bcfa04c-3c9e-47a5-946e-d7c42d3cefda","Type":"ContainerStarted","Data":"4ada1cfdced85c6c083df9a3442c5eb5e7c4153f7f114cee4d99f4faa31f108e"} Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.519986 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.551823 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=39.551800606 podStartE2EDuration="39.551800606s" podCreationTimestamp="2025-11-22 11:00:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:29.536537008 +0000 UTC m=+1309.838142305" watchObservedRunningTime="2025-11-22 11:01:29.551800606 +0000 UTC m=+1309.853405893" Nov 22 11:01:29 crc kubenswrapper[4926]: I1122 11:01:29.572424 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" podStartSLOduration=2.874025273 podStartE2EDuration="10.572405598s" podCreationTimestamp="2025-11-22 11:01:19 +0000 UTC" firstStartedPulling="2025-11-22 11:01:21.275473982 +0000 UTC m=+1301.577079269" lastFinishedPulling="2025-11-22 11:01:28.973854267 +0000 UTC m=+1309.275459594" observedRunningTime="2025-11-22 11:01:29.569393351 +0000 UTC m=+1309.870998638" watchObservedRunningTime="2025-11-22 11:01:29.572405598 +0000 UTC m=+1309.874010885" Nov 22 11:01:39 crc kubenswrapper[4926]: I1122 11:01:39.600083 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 11:01:39 crc kubenswrapper[4926]: I1122 11:01:39.623872 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=50.623854422 podStartE2EDuration="50.623854422s" podCreationTimestamp="2025-11-22 11:00:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:01:29.604930811 +0000 UTC m=+1309.906536108" watchObservedRunningTime="2025-11-22 11:01:39.623854422 +0000 UTC m=+1319.925459709" Nov 22 11:01:40 crc kubenswrapper[4926]: I1122 11:01:40.628637 4926 generic.go:334] "Generic (PLEG): container finished" podID="9c3831ca-e426-4b08-ad83-050cbedbd547" containerID="3d9b354e1d1a1af1b286c1982c537f8ed1e9dbecfe1c3bca86b1d9454ea8d874" exitCode=0 Nov 22 11:01:40 crc kubenswrapper[4926]: I1122 11:01:40.628692 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" event={"ID":"9c3831ca-e426-4b08-ad83-050cbedbd547","Type":"ContainerDied","Data":"3d9b354e1d1a1af1b286c1982c537f8ed1e9dbecfe1c3bca86b1d9454ea8d874"} Nov 22 11:01:40 crc kubenswrapper[4926]: I1122 11:01:40.710250 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.060781 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.167187 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2gsf\" (UniqueName: \"kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf\") pod \"9c3831ca-e426-4b08-ad83-050cbedbd547\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.167532 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key\") pod \"9c3831ca-e426-4b08-ad83-050cbedbd547\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.167659 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory\") pod \"9c3831ca-e426-4b08-ad83-050cbedbd547\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.167731 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle\") pod \"9c3831ca-e426-4b08-ad83-050cbedbd547\" (UID: \"9c3831ca-e426-4b08-ad83-050cbedbd547\") " Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.176220 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "9c3831ca-e426-4b08-ad83-050cbedbd547" (UID: "9c3831ca-e426-4b08-ad83-050cbedbd547"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.176270 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf" (OuterVolumeSpecName: "kube-api-access-d2gsf") pod "9c3831ca-e426-4b08-ad83-050cbedbd547" (UID: "9c3831ca-e426-4b08-ad83-050cbedbd547"). InnerVolumeSpecName "kube-api-access-d2gsf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.199549 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory" (OuterVolumeSpecName: "inventory") pod "9c3831ca-e426-4b08-ad83-050cbedbd547" (UID: "9c3831ca-e426-4b08-ad83-050cbedbd547"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.213356 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9c3831ca-e426-4b08-ad83-050cbedbd547" (UID: "9c3831ca-e426-4b08-ad83-050cbedbd547"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.270149 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.270184 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.270197 4926 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c3831ca-e426-4b08-ad83-050cbedbd547-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.270209 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2gsf\" (UniqueName: \"kubernetes.io/projected/9c3831ca-e426-4b08-ad83-050cbedbd547-kube-api-access-d2gsf\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.650329 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" event={"ID":"9c3831ca-e426-4b08-ad83-050cbedbd547","Type":"ContainerDied","Data":"5bc6d927ad08d2ff9b9f8910d5de3229c545ffec12a713ed7b7bb8357ec60d27"} Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.650379 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5bc6d927ad08d2ff9b9f8910d5de3229c545ffec12a713ed7b7bb8357ec60d27" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.650417 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.721210 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v"] Nov 22 11:01:42 crc kubenswrapper[4926]: E1122 11:01:42.721985 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c3831ca-e426-4b08-ad83-050cbedbd547" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.722014 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c3831ca-e426-4b08-ad83-050cbedbd547" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.722288 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c3831ca-e426-4b08-ad83-050cbedbd547" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.723366 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.726088 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.726308 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.726576 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.726801 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.733198 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v"] Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.887292 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.887597 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9pjm\" (UniqueName: \"kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.887634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.989166 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.989332 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9pjm\" (UniqueName: \"kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.989377 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.994168 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:42 crc kubenswrapper[4926]: I1122 11:01:42.994484 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:43 crc kubenswrapper[4926]: I1122 11:01:43.010854 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9pjm\" (UniqueName: \"kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-k2x5v\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:43 crc kubenswrapper[4926]: I1122 11:01:43.046438 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:43 crc kubenswrapper[4926]: W1122 11:01:43.581655 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5241dfa6_bfdd_495c_8853_135648e0c112.slice/crio-c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b WatchSource:0}: Error finding container c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b: Status 404 returned error can't find the container with id c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b Nov 22 11:01:43 crc kubenswrapper[4926]: I1122 11:01:43.589305 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v"] Nov 22 11:01:43 crc kubenswrapper[4926]: I1122 11:01:43.660856 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" event={"ID":"5241dfa6-bfdd-495c-8853-135648e0c112","Type":"ContainerStarted","Data":"c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b"} Nov 22 11:01:44 crc kubenswrapper[4926]: I1122 11:01:44.674429 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" event={"ID":"5241dfa6-bfdd-495c-8853-135648e0c112","Type":"ContainerStarted","Data":"e71f72d2dde766536cc4d39088c708d057e45f78aa611f1a90b1a7c3f6756145"} Nov 22 11:01:44 crc kubenswrapper[4926]: I1122 11:01:44.703245 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" podStartSLOduration=2.313906991 podStartE2EDuration="2.703218675s" podCreationTimestamp="2025-11-22 11:01:42 +0000 UTC" firstStartedPulling="2025-11-22 11:01:43.585329598 +0000 UTC m=+1323.886934905" lastFinishedPulling="2025-11-22 11:01:43.974641292 +0000 UTC m=+1324.276246589" observedRunningTime="2025-11-22 11:01:44.692003543 +0000 UTC m=+1324.993608890" watchObservedRunningTime="2025-11-22 11:01:44.703218675 +0000 UTC m=+1325.004823972" Nov 22 11:01:47 crc kubenswrapper[4926]: I1122 11:01:47.705638 4926 generic.go:334] "Generic (PLEG): container finished" podID="5241dfa6-bfdd-495c-8853-135648e0c112" containerID="e71f72d2dde766536cc4d39088c708d057e45f78aa611f1a90b1a7c3f6756145" exitCode=0 Nov 22 11:01:47 crc kubenswrapper[4926]: I1122 11:01:47.705705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" event={"ID":"5241dfa6-bfdd-495c-8853-135648e0c112","Type":"ContainerDied","Data":"e71f72d2dde766536cc4d39088c708d057e45f78aa611f1a90b1a7c3f6756145"} Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.128819 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.204087 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9pjm\" (UniqueName: \"kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm\") pod \"5241dfa6-bfdd-495c-8853-135648e0c112\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.204288 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory\") pod \"5241dfa6-bfdd-495c-8853-135648e0c112\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.204417 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key\") pod \"5241dfa6-bfdd-495c-8853-135648e0c112\" (UID: \"5241dfa6-bfdd-495c-8853-135648e0c112\") " Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.209308 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm" (OuterVolumeSpecName: "kube-api-access-d9pjm") pod "5241dfa6-bfdd-495c-8853-135648e0c112" (UID: "5241dfa6-bfdd-495c-8853-135648e0c112"). InnerVolumeSpecName "kube-api-access-d9pjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.230285 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory" (OuterVolumeSpecName: "inventory") pod "5241dfa6-bfdd-495c-8853-135648e0c112" (UID: "5241dfa6-bfdd-495c-8853-135648e0c112"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.232621 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5241dfa6-bfdd-495c-8853-135648e0c112" (UID: "5241dfa6-bfdd-495c-8853-135648e0c112"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.306609 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9pjm\" (UniqueName: \"kubernetes.io/projected/5241dfa6-bfdd-495c-8853-135648e0c112-kube-api-access-d9pjm\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.306639 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.306647 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5241dfa6-bfdd-495c-8853-135648e0c112-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.727420 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" event={"ID":"5241dfa6-bfdd-495c-8853-135648e0c112","Type":"ContainerDied","Data":"c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b"} Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.727458 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c251edbbf51e085fb0e35cc54ce77a2cab63a264d59db5cb5826da81b773781b" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.727463 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-k2x5v" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.799575 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n"] Nov 22 11:01:49 crc kubenswrapper[4926]: E1122 11:01:49.800035 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5241dfa6-bfdd-495c-8853-135648e0c112" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.800054 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5241dfa6-bfdd-495c-8853-135648e0c112" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.800302 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5241dfa6-bfdd-495c-8853-135648e0c112" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.825869 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.829612 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.829991 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.830183 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.830403 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.834076 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n"] Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.919294 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.919373 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.919435 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:49 crc kubenswrapper[4926]: I1122 11:01:49.919480 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lbzl\" (UniqueName: \"kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.021408 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lbzl\" (UniqueName: \"kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.021572 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.021627 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.021701 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.026699 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.026737 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.028163 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.040322 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lbzl\" (UniqueName: \"kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.143459 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.720098 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n"] Nov 22 11:01:50 crc kubenswrapper[4926]: W1122 11:01:50.724355 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9189297a_e5e2_47b3_9cf0_ac932c80f3bb.slice/crio-d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451 WatchSource:0}: Error finding container d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451: Status 404 returned error can't find the container with id d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451 Nov 22 11:01:50 crc kubenswrapper[4926]: I1122 11:01:50.737734 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" event={"ID":"9189297a-e5e2-47b3-9cf0-ac932c80f3bb","Type":"ContainerStarted","Data":"d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451"} Nov 22 11:01:51 crc kubenswrapper[4926]: I1122 11:01:51.748431 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" event={"ID":"9189297a-e5e2-47b3-9cf0-ac932c80f3bb","Type":"ContainerStarted","Data":"c346fc1a007af843c6ad3b0a4624e4d310e0d47de32ca65bcd3e65227715a2e6"} Nov 22 11:01:51 crc kubenswrapper[4926]: I1122 11:01:51.762261 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" podStartSLOduration=2.245305581 podStartE2EDuration="2.762241709s" podCreationTimestamp="2025-11-22 11:01:49 +0000 UTC" firstStartedPulling="2025-11-22 11:01:50.727342384 +0000 UTC m=+1331.028947671" lastFinishedPulling="2025-11-22 11:01:51.244278492 +0000 UTC m=+1331.545883799" observedRunningTime="2025-11-22 11:01:51.761376344 +0000 UTC m=+1332.062981631" watchObservedRunningTime="2025-11-22 11:01:51.762241709 +0000 UTC m=+1332.063847006" Nov 22 11:02:09 crc kubenswrapper[4926]: I1122 11:02:09.660749 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:02:09 crc kubenswrapper[4926]: I1122 11:02:09.661408 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:02:19 crc kubenswrapper[4926]: I1122 11:02:19.581828 4926 scope.go:117] "RemoveContainer" containerID="4dff75ddd520308ecb18884a161069763659fcb0c42f8df3481c373a2a0addf3" Nov 22 11:02:19 crc kubenswrapper[4926]: I1122 11:02:19.608703 4926 scope.go:117] "RemoveContainer" containerID="5e857642e9a0242c896faab7453d83e3c726aee2afa9204dea594776ca0877b4" Nov 22 11:02:39 crc kubenswrapper[4926]: I1122 11:02:39.661429 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:02:39 crc kubenswrapper[4926]: I1122 11:02:39.662022 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:03:09 crc kubenswrapper[4926]: I1122 11:03:09.661743 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:03:09 crc kubenswrapper[4926]: I1122 11:03:09.664101 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:03:09 crc kubenswrapper[4926]: I1122 11:03:09.664375 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:03:09 crc kubenswrapper[4926]: I1122 11:03:09.665811 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:03:09 crc kubenswrapper[4926]: I1122 11:03:09.666332 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d" gracePeriod=600 Nov 22 11:03:10 crc kubenswrapper[4926]: I1122 11:03:10.522009 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d" exitCode=0 Nov 22 11:03:10 crc kubenswrapper[4926]: I1122 11:03:10.522095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d"} Nov 22 11:03:10 crc kubenswrapper[4926]: I1122 11:03:10.522341 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f"} Nov 22 11:03:10 crc kubenswrapper[4926]: I1122 11:03:10.522367 4926 scope.go:117] "RemoveContainer" containerID="15cec4426d1f6906f001420dd32a2e3b60079ed2bd3dc4ce7916ceddb9716375" Nov 22 11:03:19 crc kubenswrapper[4926]: I1122 11:03:19.687301 4926 scope.go:117] "RemoveContainer" containerID="ef7fb645788760bfd3a10062b152cbea3873c21b8f43f1d9df41e595f2b29d3c" Nov 22 11:03:19 crc kubenswrapper[4926]: I1122 11:03:19.722091 4926 scope.go:117] "RemoveContainer" containerID="640e770632dc635f94dd5c8ee7a65feacae8118fabc7a4ad702ba4491add1bc9" Nov 22 11:03:19 crc kubenswrapper[4926]: I1122 11:03:19.769441 4926 scope.go:117] "RemoveContainer" containerID="f9e5017f7f19083529fb2fa8015c6f96f24811a8201e239da7bf1a8e43410573" Nov 22 11:03:19 crc kubenswrapper[4926]: I1122 11:03:19.793499 4926 scope.go:117] "RemoveContainer" containerID="edd9a0bdf0930891bdbccf0494ad4f3e4d34a7bb60cc4ecd321c0098b12cf0d3" Nov 22 11:03:19 crc kubenswrapper[4926]: I1122 11:03:19.838134 4926 scope.go:117] "RemoveContainer" containerID="7ccfdfa250d4b39504002e66aedc68e1d82b49c23654f6ab308d91c16a34e47f" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.016085 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.020107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.032670 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.145853 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dclj\" (UniqueName: \"kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.146039 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.146088 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.248070 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dclj\" (UniqueName: \"kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.248128 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.248157 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.248636 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.248758 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.267318 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dclj\" (UniqueName: \"kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj\") pod \"redhat-operators-lxbmp\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.362498 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:02 crc kubenswrapper[4926]: I1122 11:04:02.818446 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:03 crc kubenswrapper[4926]: I1122 11:04:03.093669 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c1e2328-b247-4691-b904-37fe4fbef883" containerID="ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10" exitCode=0 Nov 22 11:04:03 crc kubenswrapper[4926]: I1122 11:04:03.093709 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerDied","Data":"ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10"} Nov 22 11:04:03 crc kubenswrapper[4926]: I1122 11:04:03.093736 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerStarted","Data":"bd7af5b68f061895cf62b1bdc3c1e1877f395a94250b78c10e9c49c9d55933fc"} Nov 22 11:04:04 crc kubenswrapper[4926]: I1122 11:04:04.105594 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerStarted","Data":"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da"} Nov 22 11:04:06 crc kubenswrapper[4926]: I1122 11:04:06.133984 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c1e2328-b247-4691-b904-37fe4fbef883" containerID="181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da" exitCode=0 Nov 22 11:04:06 crc kubenswrapper[4926]: I1122 11:04:06.134137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerDied","Data":"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da"} Nov 22 11:04:08 crc kubenswrapper[4926]: I1122 11:04:08.163771 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerStarted","Data":"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d"} Nov 22 11:04:08 crc kubenswrapper[4926]: I1122 11:04:08.197205 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-lxbmp" podStartSLOduration=3.222090879 podStartE2EDuration="7.197184876s" podCreationTimestamp="2025-11-22 11:04:01 +0000 UTC" firstStartedPulling="2025-11-22 11:04:03.095630529 +0000 UTC m=+1463.397235816" lastFinishedPulling="2025-11-22 11:04:07.070724526 +0000 UTC m=+1467.372329813" observedRunningTime="2025-11-22 11:04:08.186194141 +0000 UTC m=+1468.487799438" watchObservedRunningTime="2025-11-22 11:04:08.197184876 +0000 UTC m=+1468.498790183" Nov 22 11:04:12 crc kubenswrapper[4926]: I1122 11:04:12.362720 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:12 crc kubenswrapper[4926]: I1122 11:04:12.363327 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:13 crc kubenswrapper[4926]: I1122 11:04:13.420076 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-lxbmp" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="registry-server" probeResult="failure" output=< Nov 22 11:04:13 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 11:04:13 crc kubenswrapper[4926]: > Nov 22 11:04:19 crc kubenswrapper[4926]: I1122 11:04:19.945713 4926 scope.go:117] "RemoveContainer" containerID="d644a2b72e6965c29f8677c53eb1fde30d0936ad857c292aeee5c0d754905a3c" Nov 22 11:04:19 crc kubenswrapper[4926]: I1122 11:04:19.975269 4926 scope.go:117] "RemoveContainer" containerID="7f95f216ed43bc6facdd0d7c8a829a47795cbf662d9dad96aa2be1e0492ca903" Nov 22 11:04:22 crc kubenswrapper[4926]: I1122 11:04:22.426207 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:22 crc kubenswrapper[4926]: I1122 11:04:22.485113 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:22 crc kubenswrapper[4926]: I1122 11:04:22.661145 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.317290 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-lxbmp" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="registry-server" containerID="cri-o://b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d" gracePeriod=2 Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.818061 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.916138 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dclj\" (UniqueName: \"kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj\") pod \"1c1e2328-b247-4691-b904-37fe4fbef883\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.916290 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities\") pod \"1c1e2328-b247-4691-b904-37fe4fbef883\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.916375 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content\") pod \"1c1e2328-b247-4691-b904-37fe4fbef883\" (UID: \"1c1e2328-b247-4691-b904-37fe4fbef883\") " Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.917105 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities" (OuterVolumeSpecName: "utilities") pod "1c1e2328-b247-4691-b904-37fe4fbef883" (UID: "1c1e2328-b247-4691-b904-37fe4fbef883"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:04:24 crc kubenswrapper[4926]: I1122 11:04:24.923043 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj" (OuterVolumeSpecName: "kube-api-access-2dclj") pod "1c1e2328-b247-4691-b904-37fe4fbef883" (UID: "1c1e2328-b247-4691-b904-37fe4fbef883"). InnerVolumeSpecName "kube-api-access-2dclj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.007364 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c1e2328-b247-4691-b904-37fe4fbef883" (UID: "1c1e2328-b247-4691-b904-37fe4fbef883"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.018924 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dclj\" (UniqueName: \"kubernetes.io/projected/1c1e2328-b247-4691-b904-37fe4fbef883-kube-api-access-2dclj\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.018964 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.018974 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c1e2328-b247-4691-b904-37fe4fbef883-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.330907 4926 generic.go:334] "Generic (PLEG): container finished" podID="1c1e2328-b247-4691-b904-37fe4fbef883" containerID="b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d" exitCode=0 Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.330967 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerDied","Data":"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d"} Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.331002 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-lxbmp" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.331025 4926 scope.go:117] "RemoveContainer" containerID="b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.331011 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-lxbmp" event={"ID":"1c1e2328-b247-4691-b904-37fe4fbef883","Type":"ContainerDied","Data":"bd7af5b68f061895cf62b1bdc3c1e1877f395a94250b78c10e9c49c9d55933fc"} Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.360427 4926 scope.go:117] "RemoveContainer" containerID="181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.375740 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.391742 4926 scope.go:117] "RemoveContainer" containerID="ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.391789 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-lxbmp"] Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.448557 4926 scope.go:117] "RemoveContainer" containerID="b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d" Nov 22 11:04:25 crc kubenswrapper[4926]: E1122 11:04:25.449192 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d\": container with ID starting with b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d not found: ID does not exist" containerID="b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.449310 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d"} err="failed to get container status \"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d\": rpc error: code = NotFound desc = could not find container \"b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d\": container with ID starting with b75a276ddc35715d08c19d8bb1356cab3630a3371e8ce1c150a31dc9c46a966d not found: ID does not exist" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.449346 4926 scope.go:117] "RemoveContainer" containerID="181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da" Nov 22 11:04:25 crc kubenswrapper[4926]: E1122 11:04:25.450130 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da\": container with ID starting with 181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da not found: ID does not exist" containerID="181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.450173 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da"} err="failed to get container status \"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da\": rpc error: code = NotFound desc = could not find container \"181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da\": container with ID starting with 181a295023808420a733f2bb827f523733971cb842ed2ea870f12b334051b4da not found: ID does not exist" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.450198 4926 scope.go:117] "RemoveContainer" containerID="ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10" Nov 22 11:04:25 crc kubenswrapper[4926]: E1122 11:04:25.451368 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10\": container with ID starting with ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10 not found: ID does not exist" containerID="ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10" Nov 22 11:04:25 crc kubenswrapper[4926]: I1122 11:04:25.451405 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10"} err="failed to get container status \"ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10\": rpc error: code = NotFound desc = could not find container \"ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10\": container with ID starting with ee44f5df927e1211933849ec4980ad091ca461e3ce6de3cb6d90246a300e3a10 not found: ID does not exist" Nov 22 11:04:26 crc kubenswrapper[4926]: I1122 11:04:26.593072 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" path="/var/lib/kubelet/pods/1c1e2328-b247-4691-b904-37fe4fbef883/volumes" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.443245 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:30 crc kubenswrapper[4926]: E1122 11:04:30.444681 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="extract-utilities" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.444700 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="extract-utilities" Nov 22 11:04:30 crc kubenswrapper[4926]: E1122 11:04:30.444713 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="registry-server" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.444722 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="registry-server" Nov 22 11:04:30 crc kubenswrapper[4926]: E1122 11:04:30.444743 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="extract-content" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.444756 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="extract-content" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.445060 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c1e2328-b247-4691-b904-37fe4fbef883" containerName="registry-server" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.446704 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.477249 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.515911 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.516033 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r52g7\" (UniqueName: \"kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.516087 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.617565 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.617637 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r52g7\" (UniqueName: \"kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.617700 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.618279 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.618308 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.639502 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r52g7\" (UniqueName: \"kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7\") pod \"certified-operators-hmcxt\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:30 crc kubenswrapper[4926]: I1122 11:04:30.787477 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:31 crc kubenswrapper[4926]: I1122 11:04:31.292803 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:31 crc kubenswrapper[4926]: I1122 11:04:31.388926 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerStarted","Data":"21b0439c55750a3307dffd6e514bb5790200fa56e00a015b2e6099e86524c964"} Nov 22 11:04:32 crc kubenswrapper[4926]: I1122 11:04:32.403990 4926 generic.go:334] "Generic (PLEG): container finished" podID="05484d09-4ff5-4eaa-9579-7475fef24010" containerID="fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871" exitCode=0 Nov 22 11:04:32 crc kubenswrapper[4926]: I1122 11:04:32.404054 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerDied","Data":"fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871"} Nov 22 11:04:33 crc kubenswrapper[4926]: I1122 11:04:33.414178 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerStarted","Data":"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718"} Nov 22 11:04:34 crc kubenswrapper[4926]: I1122 11:04:34.424461 4926 generic.go:334] "Generic (PLEG): container finished" podID="05484d09-4ff5-4eaa-9579-7475fef24010" containerID="8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718" exitCode=0 Nov 22 11:04:34 crc kubenswrapper[4926]: I1122 11:04:34.424508 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerDied","Data":"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718"} Nov 22 11:04:35 crc kubenswrapper[4926]: I1122 11:04:35.437519 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerStarted","Data":"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16"} Nov 22 11:04:35 crc kubenswrapper[4926]: I1122 11:04:35.484699 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hmcxt" podStartSLOduration=2.839956158 podStartE2EDuration="5.484677326s" podCreationTimestamp="2025-11-22 11:04:30 +0000 UTC" firstStartedPulling="2025-11-22 11:04:32.406777203 +0000 UTC m=+1492.708382490" lastFinishedPulling="2025-11-22 11:04:35.051498361 +0000 UTC m=+1495.353103658" observedRunningTime="2025-11-22 11:04:35.458388933 +0000 UTC m=+1495.759994240" watchObservedRunningTime="2025-11-22 11:04:35.484677326 +0000 UTC m=+1495.786282603" Nov 22 11:04:41 crc kubenswrapper[4926]: I1122 11:04:41.529817 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:41 crc kubenswrapper[4926]: I1122 11:04:41.530239 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:41 crc kubenswrapper[4926]: I1122 11:04:41.585406 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:42 crc kubenswrapper[4926]: I1122 11:04:42.570181 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:42 crc kubenswrapper[4926]: I1122 11:04:42.620236 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:44 crc kubenswrapper[4926]: I1122 11:04:44.544557 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hmcxt" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="registry-server" containerID="cri-o://41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16" gracePeriod=2 Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.021344 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.108263 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r52g7\" (UniqueName: \"kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7\") pod \"05484d09-4ff5-4eaa-9579-7475fef24010\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.108319 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities\") pod \"05484d09-4ff5-4eaa-9579-7475fef24010\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.108371 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content\") pod \"05484d09-4ff5-4eaa-9579-7475fef24010\" (UID: \"05484d09-4ff5-4eaa-9579-7475fef24010\") " Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.109390 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities" (OuterVolumeSpecName: "utilities") pod "05484d09-4ff5-4eaa-9579-7475fef24010" (UID: "05484d09-4ff5-4eaa-9579-7475fef24010"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.114722 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7" (OuterVolumeSpecName: "kube-api-access-r52g7") pod "05484d09-4ff5-4eaa-9579-7475fef24010" (UID: "05484d09-4ff5-4eaa-9579-7475fef24010"). InnerVolumeSpecName "kube-api-access-r52g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.164670 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05484d09-4ff5-4eaa-9579-7475fef24010" (UID: "05484d09-4ff5-4eaa-9579-7475fef24010"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.210332 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r52g7\" (UniqueName: \"kubernetes.io/projected/05484d09-4ff5-4eaa-9579-7475fef24010-kube-api-access-r52g7\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.210377 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.210391 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05484d09-4ff5-4eaa-9579-7475fef24010-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.553785 4926 generic.go:334] "Generic (PLEG): container finished" podID="05484d09-4ff5-4eaa-9579-7475fef24010" containerID="41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16" exitCode=0 Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.553904 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hmcxt" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.553864 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerDied","Data":"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16"} Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.555130 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hmcxt" event={"ID":"05484d09-4ff5-4eaa-9579-7475fef24010","Type":"ContainerDied","Data":"21b0439c55750a3307dffd6e514bb5790200fa56e00a015b2e6099e86524c964"} Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.555167 4926 scope.go:117] "RemoveContainer" containerID="41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.582306 4926 scope.go:117] "RemoveContainer" containerID="8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.617832 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.625868 4926 scope.go:117] "RemoveContainer" containerID="fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.626134 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hmcxt"] Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.665642 4926 scope.go:117] "RemoveContainer" containerID="41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16" Nov 22 11:04:45 crc kubenswrapper[4926]: E1122 11:04:45.666033 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16\": container with ID starting with 41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16 not found: ID does not exist" containerID="41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.666069 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16"} err="failed to get container status \"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16\": rpc error: code = NotFound desc = could not find container \"41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16\": container with ID starting with 41d9f5065f446ee412eb2bd479eef374e41238f1d879aa2400d9434c2c5eae16 not found: ID does not exist" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.666095 4926 scope.go:117] "RemoveContainer" containerID="8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718" Nov 22 11:04:45 crc kubenswrapper[4926]: E1122 11:04:45.666660 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718\": container with ID starting with 8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718 not found: ID does not exist" containerID="8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.666689 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718"} err="failed to get container status \"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718\": rpc error: code = NotFound desc = could not find container \"8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718\": container with ID starting with 8a4c60a63ff5c1b0e45f1d5ea7f37f572f13f3b96e1560d3467e7dc8efefd718 not found: ID does not exist" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.666711 4926 scope.go:117] "RemoveContainer" containerID="fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871" Nov 22 11:04:45 crc kubenswrapper[4926]: E1122 11:04:45.666928 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871\": container with ID starting with fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871 not found: ID does not exist" containerID="fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871" Nov 22 11:04:45 crc kubenswrapper[4926]: I1122 11:04:45.666948 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871"} err="failed to get container status \"fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871\": rpc error: code = NotFound desc = could not find container \"fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871\": container with ID starting with fdef2ebbbef59b48079b0b3febe06f2b5923b1abd5e0504bcfc76c1a0b2a6871 not found: ID does not exist" Nov 22 11:04:46 crc kubenswrapper[4926]: I1122 11:04:46.591313 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" path="/var/lib/kubelet/pods/05484d09-4ff5-4eaa-9579-7475fef24010/volumes" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.462666 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:04:47 crc kubenswrapper[4926]: E1122 11:04:47.463446 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="extract-utilities" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.463470 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="extract-utilities" Nov 22 11:04:47 crc kubenswrapper[4926]: E1122 11:04:47.463490 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="registry-server" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.463499 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="registry-server" Nov 22 11:04:47 crc kubenswrapper[4926]: E1122 11:04:47.463526 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="extract-content" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.463534 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="extract-content" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.463754 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="05484d09-4ff5-4eaa-9579-7475fef24010" containerName="registry-server" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.465716 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.488084 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.553102 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64pv6\" (UniqueName: \"kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.553564 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.553634 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.655795 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64pv6\" (UniqueName: \"kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.656350 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.656407 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.656857 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.657333 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.680766 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64pv6\" (UniqueName: \"kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6\") pod \"community-operators-5z8p8\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:47 crc kubenswrapper[4926]: I1122 11:04:47.802338 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:48 crc kubenswrapper[4926]: I1122 11:04:48.325758 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:04:48 crc kubenswrapper[4926]: I1122 11:04:48.585837 4926 generic.go:334] "Generic (PLEG): container finished" podID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerID="88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43" exitCode=0 Nov 22 11:04:48 crc kubenswrapper[4926]: I1122 11:04:48.592366 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerDied","Data":"88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43"} Nov 22 11:04:48 crc kubenswrapper[4926]: I1122 11:04:48.592405 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerStarted","Data":"c702bccc22e6734491693b620f77887671b13e279499090abbeb68d01ebcbd94"} Nov 22 11:04:49 crc kubenswrapper[4926]: I1122 11:04:49.595878 4926 generic.go:334] "Generic (PLEG): container finished" podID="9189297a-e5e2-47b3-9cf0-ac932c80f3bb" containerID="c346fc1a007af843c6ad3b0a4624e4d310e0d47de32ca65bcd3e65227715a2e6" exitCode=0 Nov 22 11:04:49 crc kubenswrapper[4926]: I1122 11:04:49.596076 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" event={"ID":"9189297a-e5e2-47b3-9cf0-ac932c80f3bb","Type":"ContainerDied","Data":"c346fc1a007af843c6ad3b0a4624e4d310e0d47de32ca65bcd3e65227715a2e6"} Nov 22 11:04:50 crc kubenswrapper[4926]: I1122 11:04:50.607314 4926 generic.go:334] "Generic (PLEG): container finished" podID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerID="c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591" exitCode=0 Nov 22 11:04:50 crc kubenswrapper[4926]: I1122 11:04:50.607427 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerDied","Data":"c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591"} Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.010837 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.121539 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lbzl\" (UniqueName: \"kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl\") pod \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.121729 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory\") pod \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.121915 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle\") pod \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.121951 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key\") pod \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\" (UID: \"9189297a-e5e2-47b3-9cf0-ac932c80f3bb\") " Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.126950 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "9189297a-e5e2-47b3-9cf0-ac932c80f3bb" (UID: "9189297a-e5e2-47b3-9cf0-ac932c80f3bb"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.127345 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl" (OuterVolumeSpecName: "kube-api-access-8lbzl") pod "9189297a-e5e2-47b3-9cf0-ac932c80f3bb" (UID: "9189297a-e5e2-47b3-9cf0-ac932c80f3bb"). InnerVolumeSpecName "kube-api-access-8lbzl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.152694 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory" (OuterVolumeSpecName: "inventory") pod "9189297a-e5e2-47b3-9cf0-ac932c80f3bb" (UID: "9189297a-e5e2-47b3-9cf0-ac932c80f3bb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.152951 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9189297a-e5e2-47b3-9cf0-ac932c80f3bb" (UID: "9189297a-e5e2-47b3-9cf0-ac932c80f3bb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.224112 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lbzl\" (UniqueName: \"kubernetes.io/projected/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-kube-api-access-8lbzl\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.224147 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.224158 4926 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.224166 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9189297a-e5e2-47b3-9cf0-ac932c80f3bb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.620677 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" event={"ID":"9189297a-e5e2-47b3-9cf0-ac932c80f3bb","Type":"ContainerDied","Data":"d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451"} Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.621825 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9f259c4332afd785189d0a786ecd9db31ebc1718206238c1df945976127a451" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.620745 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.623196 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerStarted","Data":"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2"} Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.668139 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5z8p8" podStartSLOduration=2.254331827 podStartE2EDuration="4.668114851s" podCreationTimestamp="2025-11-22 11:04:47 +0000 UTC" firstStartedPulling="2025-11-22 11:04:48.592206595 +0000 UTC m=+1508.893811882" lastFinishedPulling="2025-11-22 11:04:51.005989619 +0000 UTC m=+1511.307594906" observedRunningTime="2025-11-22 11:04:51.654053319 +0000 UTC m=+1511.955658636" watchObservedRunningTime="2025-11-22 11:04:51.668114851 +0000 UTC m=+1511.969720138" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.715453 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496"] Nov 22 11:04:51 crc kubenswrapper[4926]: E1122 11:04:51.715911 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9189297a-e5e2-47b3-9cf0-ac932c80f3bb" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.715930 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9189297a-e5e2-47b3-9cf0-ac932c80f3bb" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.726326 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9189297a-e5e2-47b3-9cf0-ac932c80f3bb" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.727330 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.730405 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496"] Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.732312 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.732356 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.732752 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.733271 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.837327 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.837621 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.837784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4884\" (UniqueName: \"kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.939627 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.940612 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.940737 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4884\" (UniqueName: \"kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.946040 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.946040 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:51 crc kubenswrapper[4926]: I1122 11:04:51.964650 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4884\" (UniqueName: \"kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7v496\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:52 crc kubenswrapper[4926]: I1122 11:04:52.051580 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:04:52 crc kubenswrapper[4926]: I1122 11:04:52.555114 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496"] Nov 22 11:04:52 crc kubenswrapper[4926]: I1122 11:04:52.631488 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" event={"ID":"fae080f1-2e5d-463a-ae8e-0c29025a62a3","Type":"ContainerStarted","Data":"2d3d0bdd80531c2b67b677ccd997ed020356134e9f583fef5704db24877b3931"} Nov 22 11:04:53 crc kubenswrapper[4926]: I1122 11:04:53.641089 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" event={"ID":"fae080f1-2e5d-463a-ae8e-0c29025a62a3","Type":"ContainerStarted","Data":"182936f0e13c5baaefa5d2e3fce9e2b3221c0e35c92139ffa6df7e05ead4b219"} Nov 22 11:04:53 crc kubenswrapper[4926]: I1122 11:04:53.663816 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" podStartSLOduration=2.286625101 podStartE2EDuration="2.663798352s" podCreationTimestamp="2025-11-22 11:04:51 +0000 UTC" firstStartedPulling="2025-11-22 11:04:52.560659581 +0000 UTC m=+1512.862264868" lastFinishedPulling="2025-11-22 11:04:52.937832832 +0000 UTC m=+1513.239438119" observedRunningTime="2025-11-22 11:04:53.655066072 +0000 UTC m=+1513.956671359" watchObservedRunningTime="2025-11-22 11:04:53.663798352 +0000 UTC m=+1513.965403639" Nov 22 11:04:57 crc kubenswrapper[4926]: I1122 11:04:57.803917 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:57 crc kubenswrapper[4926]: I1122 11:04:57.804471 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:57 crc kubenswrapper[4926]: I1122 11:04:57.855466 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:58 crc kubenswrapper[4926]: I1122 11:04:58.763923 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:04:58 crc kubenswrapper[4926]: I1122 11:04:58.815067 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:05:00 crc kubenswrapper[4926]: I1122 11:05:00.719348 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5z8p8" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="registry-server" containerID="cri-o://a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2" gracePeriod=2 Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.169730 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.221238 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities\") pod \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.221459 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64pv6\" (UniqueName: \"kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6\") pod \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.221563 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content\") pod \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\" (UID: \"9e291be7-1e76-4c86-ba72-9f959a67b2fc\") " Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.223045 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities" (OuterVolumeSpecName: "utilities") pod "9e291be7-1e76-4c86-ba72-9f959a67b2fc" (UID: "9e291be7-1e76-4c86-ba72-9f959a67b2fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.233648 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6" (OuterVolumeSpecName: "kube-api-access-64pv6") pod "9e291be7-1e76-4c86-ba72-9f959a67b2fc" (UID: "9e291be7-1e76-4c86-ba72-9f959a67b2fc"). InnerVolumeSpecName "kube-api-access-64pv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.324129 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64pv6\" (UniqueName: \"kubernetes.io/projected/9e291be7-1e76-4c86-ba72-9f959a67b2fc-kube-api-access-64pv6\") on node \"crc\" DevicePath \"\"" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.324161 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.335354 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e291be7-1e76-4c86-ba72-9f959a67b2fc" (UID: "9e291be7-1e76-4c86-ba72-9f959a67b2fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.425470 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e291be7-1e76-4c86-ba72-9f959a67b2fc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.731776 4926 generic.go:334] "Generic (PLEG): container finished" podID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerID="a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2" exitCode=0 Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.731827 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5z8p8" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.731835 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerDied","Data":"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2"} Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.731951 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5z8p8" event={"ID":"9e291be7-1e76-4c86-ba72-9f959a67b2fc","Type":"ContainerDied","Data":"c702bccc22e6734491693b620f77887671b13e279499090abbeb68d01ebcbd94"} Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.732007 4926 scope.go:117] "RemoveContainer" containerID="a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.771237 4926 scope.go:117] "RemoveContainer" containerID="c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.780296 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.790806 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5z8p8"] Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.807669 4926 scope.go:117] "RemoveContainer" containerID="88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.833723 4926 scope.go:117] "RemoveContainer" containerID="a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2" Nov 22 11:05:01 crc kubenswrapper[4926]: E1122 11:05:01.834067 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2\": container with ID starting with a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2 not found: ID does not exist" containerID="a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.834115 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2"} err="failed to get container status \"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2\": rpc error: code = NotFound desc = could not find container \"a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2\": container with ID starting with a5b5189de4990c5dbfbdc5b326f69fa1779026b934e2a4d8d3fdfb8acdfc9cd2 not found: ID does not exist" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.834146 4926 scope.go:117] "RemoveContainer" containerID="c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591" Nov 22 11:05:01 crc kubenswrapper[4926]: E1122 11:05:01.834494 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591\": container with ID starting with c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591 not found: ID does not exist" containerID="c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.834526 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591"} err="failed to get container status \"c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591\": rpc error: code = NotFound desc = could not find container \"c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591\": container with ID starting with c427ecb1014c86c74b5c0494c803ef51d4017a38a848304665007d8282cc7591 not found: ID does not exist" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.834559 4926 scope.go:117] "RemoveContainer" containerID="88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43" Nov 22 11:05:01 crc kubenswrapper[4926]: E1122 11:05:01.835088 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43\": container with ID starting with 88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43 not found: ID does not exist" containerID="88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43" Nov 22 11:05:01 crc kubenswrapper[4926]: I1122 11:05:01.835110 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43"} err="failed to get container status \"88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43\": rpc error: code = NotFound desc = could not find container \"88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43\": container with ID starting with 88ef750c246cf8007f5fd6ddceb83a2d4df6e15bcf22405f77abe3de6c65bb43 not found: ID does not exist" Nov 22 11:05:02 crc kubenswrapper[4926]: I1122 11:05:02.597380 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" path="/var/lib/kubelet/pods/9e291be7-1e76-4c86-ba72-9f959a67b2fc/volumes" Nov 22 11:05:20 crc kubenswrapper[4926]: I1122 11:05:20.040070 4926 scope.go:117] "RemoveContainer" containerID="777af81e7a14c41a8cb6c96828eba45f9d709c04037bca5f9fe9a335250e7712" Nov 22 11:05:20 crc kubenswrapper[4926]: I1122 11:05:20.064516 4926 scope.go:117] "RemoveContainer" containerID="6b87219a2657302c8f7393f9f99a82078ce754798e7fafadb126d2f9454f7919" Nov 22 11:05:39 crc kubenswrapper[4926]: I1122 11:05:39.661443 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:05:39 crc kubenswrapper[4926]: I1122 11:05:39.663642 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:06:03 crc kubenswrapper[4926]: I1122 11:06:03.043509 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-r4wxx"] Nov 22 11:06:03 crc kubenswrapper[4926]: I1122 11:06:03.051637 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-r4wxx"] Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.041432 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-6r9dw"] Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.054756 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-6mrs7"] Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.064045 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-6mrs7"] Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.072100 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-6r9dw"] Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.599040 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="345b6795-6ff9-4d04-9128-4123b30d27da" path="/var/lib/kubelet/pods/345b6795-6ff9-4d04-9128-4123b30d27da/volumes" Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.600108 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3ff5805-0887-4e05-98ca-d88590cbe337" path="/var/lib/kubelet/pods/a3ff5805-0887-4e05-98ca-d88590cbe337/volumes" Nov 22 11:06:04 crc kubenswrapper[4926]: I1122 11:06:04.600874 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf5bb2c6-b5be-4094-afe5-380401435ebd" path="/var/lib/kubelet/pods/cf5bb2c6-b5be-4094-afe5-380401435ebd/volumes" Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.038304 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-9385-account-create-update-w2grf"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.048357 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-0a97-account-create-update-sx5t4"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.068636 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-8467-account-create-update-psl9r"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.082059 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-9385-account-create-update-w2grf"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.089673 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-8467-account-create-update-psl9r"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.098987 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-0a97-account-create-update-sx5t4"] Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.593608 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d5250a1-2aea-43b4-899a-d714d1dbf3ef" path="/var/lib/kubelet/pods/0d5250a1-2aea-43b4-899a-d714d1dbf3ef/volumes" Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.595238 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c28a203-3548-47cc-891f-050d9a3fd7c4" path="/var/lib/kubelet/pods/5c28a203-3548-47cc-891f-050d9a3fd7c4/volumes" Nov 22 11:06:08 crc kubenswrapper[4926]: I1122 11:06:08.596087 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8db0114b-7b33-4e75-88ba-cb05b049fa7d" path="/var/lib/kubelet/pods/8db0114b-7b33-4e75-88ba-cb05b049fa7d/volumes" Nov 22 11:06:09 crc kubenswrapper[4926]: I1122 11:06:09.660722 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:06:09 crc kubenswrapper[4926]: I1122 11:06:09.661051 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.190848 4926 scope.go:117] "RemoveContainer" containerID="ce24942aa8ce2c5efc5d213d15ff6f145882d6d60592651748244592677777ac" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.215168 4926 scope.go:117] "RemoveContainer" containerID="6c8d5010b00fd6e878882d30f780ec04054d2b4710f7a3acb2ed6c8ae3db3922" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.256128 4926 scope.go:117] "RemoveContainer" containerID="d1f44a6680f094dbfc9cff4d278be84da0f7900cbe78cd830582f4592d2f64b8" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.326290 4926 scope.go:117] "RemoveContainer" containerID="5f4feeeb4ce4a0d4fae2c08fb2a8ea4b18e841efaef2f68ed65614e2cc32f028" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.376951 4926 scope.go:117] "RemoveContainer" containerID="f54f0f19b2b290a03232140abf90a1715cd4fa3ac2202c721ff4f8ca53884304" Nov 22 11:06:20 crc kubenswrapper[4926]: I1122 11:06:20.421713 4926 scope.go:117] "RemoveContainer" containerID="0f90830d6cc4699c2d70ae9d060b4973a427a356e456c28f9cdbf4cd41becf3c" Nov 22 11:06:24 crc kubenswrapper[4926]: I1122 11:06:24.551193 4926 generic.go:334] "Generic (PLEG): container finished" podID="fae080f1-2e5d-463a-ae8e-0c29025a62a3" containerID="182936f0e13c5baaefa5d2e3fce9e2b3221c0e35c92139ffa6df7e05ead4b219" exitCode=0 Nov 22 11:06:24 crc kubenswrapper[4926]: I1122 11:06:24.551254 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" event={"ID":"fae080f1-2e5d-463a-ae8e-0c29025a62a3","Type":"ContainerDied","Data":"182936f0e13c5baaefa5d2e3fce9e2b3221c0e35c92139ffa6df7e05ead4b219"} Nov 22 11:06:25 crc kubenswrapper[4926]: I1122 11:06:25.947443 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.086956 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key\") pod \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.087019 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4884\" (UniqueName: \"kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884\") pod \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.087100 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory\") pod \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\" (UID: \"fae080f1-2e5d-463a-ae8e-0c29025a62a3\") " Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.091912 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884" (OuterVolumeSpecName: "kube-api-access-q4884") pod "fae080f1-2e5d-463a-ae8e-0c29025a62a3" (UID: "fae080f1-2e5d-463a-ae8e-0c29025a62a3"). InnerVolumeSpecName "kube-api-access-q4884". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.114050 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory" (OuterVolumeSpecName: "inventory") pod "fae080f1-2e5d-463a-ae8e-0c29025a62a3" (UID: "fae080f1-2e5d-463a-ae8e-0c29025a62a3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.114493 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fae080f1-2e5d-463a-ae8e-0c29025a62a3" (UID: "fae080f1-2e5d-463a-ae8e-0c29025a62a3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.189597 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.189636 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4884\" (UniqueName: \"kubernetes.io/projected/fae080f1-2e5d-463a-ae8e-0c29025a62a3-kube-api-access-q4884\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.189652 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fae080f1-2e5d-463a-ae8e-0c29025a62a3-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.571704 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" event={"ID":"fae080f1-2e5d-463a-ae8e-0c29025a62a3","Type":"ContainerDied","Data":"2d3d0bdd80531c2b67b677ccd997ed020356134e9f583fef5704db24877b3931"} Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.572036 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d3d0bdd80531c2b67b677ccd997ed020356134e9f583fef5704db24877b3931" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.571749 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7v496" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648261 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg"] Nov 22 11:06:26 crc kubenswrapper[4926]: E1122 11:06:26.648604 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fae080f1-2e5d-463a-ae8e-0c29025a62a3" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648621 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="fae080f1-2e5d-463a-ae8e-0c29025a62a3" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:26 crc kubenswrapper[4926]: E1122 11:06:26.648636 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="registry-server" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648643 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="registry-server" Nov 22 11:06:26 crc kubenswrapper[4926]: E1122 11:06:26.648662 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="extract-utilities" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648668 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="extract-utilities" Nov 22 11:06:26 crc kubenswrapper[4926]: E1122 11:06:26.648706 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="extract-content" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648712 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="extract-content" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648871 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="fae080f1-2e5d-463a-ae8e-0c29025a62a3" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.648904 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e291be7-1e76-4c86-ba72-9f959a67b2fc" containerName="registry-server" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.649524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.661001 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.661064 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.662308 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg"] Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.663853 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.664187 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.805985 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.806166 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.806463 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7ght\" (UniqueName: \"kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.908105 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7ght\" (UniqueName: \"kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.908269 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.908370 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.912204 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.912670 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.935035 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7ght\" (UniqueName: \"kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-76jbg\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:26 crc kubenswrapper[4926]: I1122 11:06:26.978608 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:06:27 crc kubenswrapper[4926]: I1122 11:06:27.477937 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg"] Nov 22 11:06:27 crc kubenswrapper[4926]: W1122 11:06:27.481836 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab012855_82a0_4f87_97a7_e3c2d1490dda.slice/crio-4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12 WatchSource:0}: Error finding container 4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12: Status 404 returned error can't find the container with id 4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12 Nov 22 11:06:27 crc kubenswrapper[4926]: I1122 11:06:27.484411 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:06:27 crc kubenswrapper[4926]: I1122 11:06:27.579656 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" event={"ID":"ab012855-82a0-4f87-97a7-e3c2d1490dda","Type":"ContainerStarted","Data":"4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12"} Nov 22 11:06:28 crc kubenswrapper[4926]: I1122 11:06:28.591459 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" event={"ID":"ab012855-82a0-4f87-97a7-e3c2d1490dda","Type":"ContainerStarted","Data":"1565257d6216c7fa410364e81f3a039a8375519ba50152b35e329d38bdb6de1a"} Nov 22 11:06:28 crc kubenswrapper[4926]: I1122 11:06:28.611038 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" podStartSLOduration=2.149759667 podStartE2EDuration="2.610997754s" podCreationTimestamp="2025-11-22 11:06:26 +0000 UTC" firstStartedPulling="2025-11-22 11:06:27.484109468 +0000 UTC m=+1607.785714775" lastFinishedPulling="2025-11-22 11:06:27.945347575 +0000 UTC m=+1608.246952862" observedRunningTime="2025-11-22 11:06:28.606773793 +0000 UTC m=+1608.908379090" watchObservedRunningTime="2025-11-22 11:06:28.610997754 +0000 UTC m=+1608.912603051" Nov 22 11:06:31 crc kubenswrapper[4926]: I1122 11:06:31.042643 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-tgj2k"] Nov 22 11:06:31 crc kubenswrapper[4926]: I1122 11:06:31.053509 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-tgj2k"] Nov 22 11:06:32 crc kubenswrapper[4926]: I1122 11:06:32.593536 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8055b7b-5391-4ded-a3a7-ca9e86b43ec6" path="/var/lib/kubelet/pods/a8055b7b-5391-4ded-a3a7-ca9e86b43ec6/volumes" Nov 22 11:06:39 crc kubenswrapper[4926]: I1122 11:06:39.660915 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:06:39 crc kubenswrapper[4926]: I1122 11:06:39.661544 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:06:39 crc kubenswrapper[4926]: I1122 11:06:39.661600 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:06:39 crc kubenswrapper[4926]: I1122 11:06:39.662460 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:06:39 crc kubenswrapper[4926]: I1122 11:06:39.662533 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" gracePeriod=600 Nov 22 11:06:39 crc kubenswrapper[4926]: E1122 11:06:39.786054 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:06:40 crc kubenswrapper[4926]: I1122 11:06:40.712624 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" exitCode=0 Nov 22 11:06:40 crc kubenswrapper[4926]: I1122 11:06:40.712705 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f"} Nov 22 11:06:40 crc kubenswrapper[4926]: I1122 11:06:40.712955 4926 scope.go:117] "RemoveContainer" containerID="48831bc9d5675e008344df0cb1df4d3942248ba162e33c17e856f93d5faa1d3d" Nov 22 11:06:40 crc kubenswrapper[4926]: I1122 11:06:40.713543 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:06:40 crc kubenswrapper[4926]: E1122 11:06:40.713765 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.034343 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-9g6rc"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.046420 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-1603-account-create-update-v8zj7"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.057532 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-9g6rc"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.065265 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-1603-account-create-update-v8zj7"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.071862 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-chmwr"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.078708 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-d48c-account-create-update-8kkx2"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.086848 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-chmwr"] Nov 22 11:06:43 crc kubenswrapper[4926]: I1122 11:06:43.095084 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-d48c-account-create-update-8kkx2"] Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.021137 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-v8z5c"] Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.028695 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-v8z5c"] Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.037052 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-619d-account-create-update-qwqzd"] Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.044685 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-619d-account-create-update-qwqzd"] Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.593496 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48b1eac1-fa58-499b-ad3b-f76d66b4f471" path="/var/lib/kubelet/pods/48b1eac1-fa58-499b-ad3b-f76d66b4f471/volumes" Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.594056 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66f0276e-33a0-4b96-ae5d-866925c6310a" path="/var/lib/kubelet/pods/66f0276e-33a0-4b96-ae5d-866925c6310a/volumes" Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.594574 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a3652ee-cb8e-4f96-8953-ba9f3a4cef79" path="/var/lib/kubelet/pods/7a3652ee-cb8e-4f96-8953-ba9f3a4cef79/volumes" Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.595122 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="960db37c-8997-460e-9f83-46d08b7597b1" path="/var/lib/kubelet/pods/960db37c-8997-460e-9f83-46d08b7597b1/volumes" Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.596111 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcafcf70-6fc9-4c0c-bc53-8334a5eda59c" path="/var/lib/kubelet/pods/dcafcf70-6fc9-4c0c-bc53-8334a5eda59c/volumes" Nov 22 11:06:44 crc kubenswrapper[4926]: I1122 11:06:44.596609 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebe92277-de4f-49bf-a390-a1a649e35b2a" path="/var/lib/kubelet/pods/ebe92277-de4f-49bf-a390-a1a649e35b2a/volumes" Nov 22 11:06:48 crc kubenswrapper[4926]: I1122 11:06:48.040963 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-lbhdp"] Nov 22 11:06:48 crc kubenswrapper[4926]: I1122 11:06:48.060007 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-lbhdp"] Nov 22 11:06:48 crc kubenswrapper[4926]: I1122 11:06:48.596480 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd927212-5062-4fb3-b93e-6804d57c251c" path="/var/lib/kubelet/pods/dd927212-5062-4fb3-b93e-6804d57c251c/volumes" Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.869177 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.872149 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.880084 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.969783 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbcsz\" (UniqueName: \"kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.969950 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:50 crc kubenswrapper[4926]: I1122 11:06:50.970014 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.072281 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.072345 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.072468 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbcsz\" (UniqueName: \"kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.072765 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.072954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.091129 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbcsz\" (UniqueName: \"kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz\") pod \"redhat-marketplace-dl2kh\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.195123 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.581561 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:06:51 crc kubenswrapper[4926]: E1122 11:06:51.582083 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.675778 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:06:51 crc kubenswrapper[4926]: I1122 11:06:51.825789 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerStarted","Data":"631daf409687f9c40e97385f5f193f0d44fb0a09ce36e545865fcc50a991cbf1"} Nov 22 11:06:52 crc kubenswrapper[4926]: I1122 11:06:52.838969 4926 generic.go:334] "Generic (PLEG): container finished" podID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerID="01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733" exitCode=0 Nov 22 11:06:52 crc kubenswrapper[4926]: I1122 11:06:52.839062 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerDied","Data":"01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733"} Nov 22 11:06:53 crc kubenswrapper[4926]: I1122 11:06:53.850137 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerStarted","Data":"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec"} Nov 22 11:06:54 crc kubenswrapper[4926]: I1122 11:06:54.861476 4926 generic.go:334] "Generic (PLEG): container finished" podID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerID="fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec" exitCode=0 Nov 22 11:06:54 crc kubenswrapper[4926]: I1122 11:06:54.861547 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerDied","Data":"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec"} Nov 22 11:06:55 crc kubenswrapper[4926]: I1122 11:06:55.873287 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerStarted","Data":"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64"} Nov 22 11:06:55 crc kubenswrapper[4926]: I1122 11:06:55.896660 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dl2kh" podStartSLOduration=3.464064928 podStartE2EDuration="5.896642479s" podCreationTimestamp="2025-11-22 11:06:50 +0000 UTC" firstStartedPulling="2025-11-22 11:06:52.841952185 +0000 UTC m=+1633.143557472" lastFinishedPulling="2025-11-22 11:06:55.274529736 +0000 UTC m=+1635.576135023" observedRunningTime="2025-11-22 11:06:55.890149493 +0000 UTC m=+1636.191754800" watchObservedRunningTime="2025-11-22 11:06:55.896642479 +0000 UTC m=+1636.198247766" Nov 22 11:07:01 crc kubenswrapper[4926]: I1122 11:07:01.196188 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:01 crc kubenswrapper[4926]: I1122 11:07:01.196740 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:01 crc kubenswrapper[4926]: I1122 11:07:01.265484 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:01 crc kubenswrapper[4926]: I1122 11:07:01.963349 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:02 crc kubenswrapper[4926]: I1122 11:07:02.011177 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:07:03 crc kubenswrapper[4926]: I1122 11:07:03.942438 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dl2kh" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="registry-server" containerID="cri-o://b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64" gracePeriod=2 Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.374063 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.535212 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities\") pod \"5931b24a-a37e-4d64-bc97-bc71a3533e79\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.535268 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content\") pod \"5931b24a-a37e-4d64-bc97-bc71a3533e79\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.535296 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbcsz\" (UniqueName: \"kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz\") pod \"5931b24a-a37e-4d64-bc97-bc71a3533e79\" (UID: \"5931b24a-a37e-4d64-bc97-bc71a3533e79\") " Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.536141 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities" (OuterVolumeSpecName: "utilities") pod "5931b24a-a37e-4d64-bc97-bc71a3533e79" (UID: "5931b24a-a37e-4d64-bc97-bc71a3533e79"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.540331 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz" (OuterVolumeSpecName: "kube-api-access-bbcsz") pod "5931b24a-a37e-4d64-bc97-bc71a3533e79" (UID: "5931b24a-a37e-4d64-bc97-bc71a3533e79"). InnerVolumeSpecName "kube-api-access-bbcsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.554297 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5931b24a-a37e-4d64-bc97-bc71a3533e79" (UID: "5931b24a-a37e-4d64-bc97-bc71a3533e79"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.582468 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:07:04 crc kubenswrapper[4926]: E1122 11:07:04.582907 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.637219 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.637257 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5931b24a-a37e-4d64-bc97-bc71a3533e79-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.637274 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbcsz\" (UniqueName: \"kubernetes.io/projected/5931b24a-a37e-4d64-bc97-bc71a3533e79-kube-api-access-bbcsz\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.956963 4926 generic.go:334] "Generic (PLEG): container finished" podID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerID="b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64" exitCode=0 Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.957012 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerDied","Data":"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64"} Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.957047 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dl2kh" event={"ID":"5931b24a-a37e-4d64-bc97-bc71a3533e79","Type":"ContainerDied","Data":"631daf409687f9c40e97385f5f193f0d44fb0a09ce36e545865fcc50a991cbf1"} Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.957068 4926 scope.go:117] "RemoveContainer" containerID="b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.957079 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dl2kh" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.981960 4926 scope.go:117] "RemoveContainer" containerID="fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec" Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.984811 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:07:04 crc kubenswrapper[4926]: I1122 11:07:04.992625 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dl2kh"] Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.001411 4926 scope.go:117] "RemoveContainer" containerID="01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.052610 4926 scope.go:117] "RemoveContainer" containerID="b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64" Nov 22 11:07:05 crc kubenswrapper[4926]: E1122 11:07:05.055329 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64\": container with ID starting with b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64 not found: ID does not exist" containerID="b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.055379 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64"} err="failed to get container status \"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64\": rpc error: code = NotFound desc = could not find container \"b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64\": container with ID starting with b949149423c3e25a85289e1dae9194dc753749bace9964e51a873239c6a1fc64 not found: ID does not exist" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.055408 4926 scope.go:117] "RemoveContainer" containerID="fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec" Nov 22 11:07:05 crc kubenswrapper[4926]: E1122 11:07:05.055749 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec\": container with ID starting with fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec not found: ID does not exist" containerID="fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.055798 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec"} err="failed to get container status \"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec\": rpc error: code = NotFound desc = could not find container \"fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec\": container with ID starting with fcde34024c2579552a92f69395c0e9ceac03aacc1ebd0bbb560692c10abcecec not found: ID does not exist" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.055828 4926 scope.go:117] "RemoveContainer" containerID="01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733" Nov 22 11:07:05 crc kubenswrapper[4926]: E1122 11:07:05.056083 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733\": container with ID starting with 01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733 not found: ID does not exist" containerID="01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733" Nov 22 11:07:05 crc kubenswrapper[4926]: I1122 11:07:05.056104 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733"} err="failed to get container status \"01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733\": rpc error: code = NotFound desc = could not find container \"01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733\": container with ID starting with 01f0bbfd05f2cabede9941f2fbee25fd9bf0c1cc605009968b28b4e5e5413733 not found: ID does not exist" Nov 22 11:07:06 crc kubenswrapper[4926]: I1122 11:07:06.593851 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" path="/var/lib/kubelet/pods/5931b24a-a37e-4d64-bc97-bc71a3533e79/volumes" Nov 22 11:07:18 crc kubenswrapper[4926]: I1122 11:07:18.582139 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:07:18 crc kubenswrapper[4926]: E1122 11:07:18.582990 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.563257 4926 scope.go:117] "RemoveContainer" containerID="04a51ac8e1698115caf042a93f113f50deb41ab5145e1e6d37b799736ef2cb52" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.610014 4926 scope.go:117] "RemoveContainer" containerID="ce0a4d5f5326f1f2b474d106d6f352c77b72bc88d3b0fffb6a0e5b959fb7d238" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.643821 4926 scope.go:117] "RemoveContainer" containerID="b0e3bdbb07fe25755c1e52530762c8d9446781e358ae051f6a85f9e88f545126" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.669839 4926 scope.go:117] "RemoveContainer" containerID="e41d7d056d21ace0640b804051c41cea9d2239f6e9948e5404521e3e489aa701" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.717557 4926 scope.go:117] "RemoveContainer" containerID="8442a9d52ad5c65e7b1a997b242b9e85f5bb066c19e108a3f1539379efbc1f2b" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.763814 4926 scope.go:117] "RemoveContainer" containerID="a05ee305fa9670ecd2cec4a8728d5462baa2581f1025b2c32c67ca9fe162b846" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.811538 4926 scope.go:117] "RemoveContainer" containerID="b0a75f89a583a31f06294d77b38484c29b8effc5db0ad2e8c255a7156a8d3c51" Nov 22 11:07:20 crc kubenswrapper[4926]: I1122 11:07:20.845784 4926 scope.go:117] "RemoveContainer" containerID="2afcf3d8cf96a3fa67f4252e2793b4ba5cc99357bd37ac57be630abf687f325b" Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.068238 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7fq8n"] Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.079699 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-d6xxc"] Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.089057 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-5lxk9"] Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.095836 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7fq8n"] Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.103001 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-d6xxc"] Nov 22 11:07:27 crc kubenswrapper[4926]: I1122 11:07:27.109508 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-5lxk9"] Nov 22 11:07:28 crc kubenswrapper[4926]: I1122 11:07:28.602448 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="116d9793-8efe-45b6-8a2c-5f4c990346ad" path="/var/lib/kubelet/pods/116d9793-8efe-45b6-8a2c-5f4c990346ad/volumes" Nov 22 11:07:28 crc kubenswrapper[4926]: I1122 11:07:28.603299 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fa25590-aff0-48a4-ac01-3671555d7b1a" path="/var/lib/kubelet/pods/6fa25590-aff0-48a4-ac01-3671555d7b1a/volumes" Nov 22 11:07:28 crc kubenswrapper[4926]: I1122 11:07:28.604157 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bee0e7d2-310f-4b01-8f79-83f113613329" path="/var/lib/kubelet/pods/bee0e7d2-310f-4b01-8f79-83f113613329/volumes" Nov 22 11:07:33 crc kubenswrapper[4926]: I1122 11:07:33.582827 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:07:33 crc kubenswrapper[4926]: E1122 11:07:33.583728 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:07:38 crc kubenswrapper[4926]: I1122 11:07:38.316404 4926 generic.go:334] "Generic (PLEG): container finished" podID="ab012855-82a0-4f87-97a7-e3c2d1490dda" containerID="1565257d6216c7fa410364e81f3a039a8375519ba50152b35e329d38bdb6de1a" exitCode=0 Nov 22 11:07:38 crc kubenswrapper[4926]: I1122 11:07:38.316492 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" event={"ID":"ab012855-82a0-4f87-97a7-e3c2d1490dda","Type":"ContainerDied","Data":"1565257d6216c7fa410364e81f3a039a8375519ba50152b35e329d38bdb6de1a"} Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.711874 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.814763 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7ght\" (UniqueName: \"kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght\") pod \"ab012855-82a0-4f87-97a7-e3c2d1490dda\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.814825 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key\") pod \"ab012855-82a0-4f87-97a7-e3c2d1490dda\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.815191 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory\") pod \"ab012855-82a0-4f87-97a7-e3c2d1490dda\" (UID: \"ab012855-82a0-4f87-97a7-e3c2d1490dda\") " Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.821127 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght" (OuterVolumeSpecName: "kube-api-access-h7ght") pod "ab012855-82a0-4f87-97a7-e3c2d1490dda" (UID: "ab012855-82a0-4f87-97a7-e3c2d1490dda"). InnerVolumeSpecName "kube-api-access-h7ght". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.849257 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab012855-82a0-4f87-97a7-e3c2d1490dda" (UID: "ab012855-82a0-4f87-97a7-e3c2d1490dda"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.858617 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory" (OuterVolumeSpecName: "inventory") pod "ab012855-82a0-4f87-97a7-e3c2d1490dda" (UID: "ab012855-82a0-4f87-97a7-e3c2d1490dda"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.917054 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.917094 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7ght\" (UniqueName: \"kubernetes.io/projected/ab012855-82a0-4f87-97a7-e3c2d1490dda-kube-api-access-h7ght\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:39 crc kubenswrapper[4926]: I1122 11:07:39.917107 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab012855-82a0-4f87-97a7-e3c2d1490dda-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.339565 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" event={"ID":"ab012855-82a0-4f87-97a7-e3c2d1490dda","Type":"ContainerDied","Data":"4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12"} Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.340146 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4acb716a27d78009783a57e53064bdd8c06bcbdeb98410409dbe1a25832f3f12" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.340252 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-76jbg" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.543365 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp"] Nov 22 11:07:40 crc kubenswrapper[4926]: E1122 11:07:40.544024 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab012855-82a0-4f87-97a7-e3c2d1490dda" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544053 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab012855-82a0-4f87-97a7-e3c2d1490dda" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:40 crc kubenswrapper[4926]: E1122 11:07:40.544090 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="registry-server" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544103 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="registry-server" Nov 22 11:07:40 crc kubenswrapper[4926]: E1122 11:07:40.544127 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="extract-content" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544138 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="extract-content" Nov 22 11:07:40 crc kubenswrapper[4926]: E1122 11:07:40.544188 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="extract-utilities" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544201 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="extract-utilities" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544535 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5931b24a-a37e-4d64-bc97-bc71a3533e79" containerName="registry-server" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.544587 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab012855-82a0-4f87-97a7-e3c2d1490dda" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.545622 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.547475 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.547591 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.547857 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.548649 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.556847 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp"] Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.629604 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knbst\" (UniqueName: \"kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.629694 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.629727 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.734306 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knbst\" (UniqueName: \"kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.734447 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.734485 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.738956 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.742866 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.756913 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knbst\" (UniqueName: \"kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:40 crc kubenswrapper[4926]: I1122 11:07:40.871282 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:41 crc kubenswrapper[4926]: I1122 11:07:41.402440 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp"] Nov 22 11:07:41 crc kubenswrapper[4926]: I1122 11:07:41.934033 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:07:42 crc kubenswrapper[4926]: I1122 11:07:42.365446 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" event={"ID":"ea024223-c658-4c22-9318-8eb14052b38f","Type":"ContainerStarted","Data":"73ac4847c028ab739b9061383bfb2ef14a2663c558c3780f19652a1234a66e4a"} Nov 22 11:07:42 crc kubenswrapper[4926]: I1122 11:07:42.365730 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" event={"ID":"ea024223-c658-4c22-9318-8eb14052b38f","Type":"ContainerStarted","Data":"741761e204644e1194a73fa370e953eacf9c63b600724be7b4c84d46b84418cf"} Nov 22 11:07:42 crc kubenswrapper[4926]: I1122 11:07:42.385846 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" podStartSLOduration=1.862879574 podStartE2EDuration="2.385831048s" podCreationTimestamp="2025-11-22 11:07:40 +0000 UTC" firstStartedPulling="2025-11-22 11:07:41.407593108 +0000 UTC m=+1681.709198385" lastFinishedPulling="2025-11-22 11:07:41.930544562 +0000 UTC m=+1682.232149859" observedRunningTime="2025-11-22 11:07:42.385707225 +0000 UTC m=+1682.687312512" watchObservedRunningTime="2025-11-22 11:07:42.385831048 +0000 UTC m=+1682.687436335" Nov 22 11:07:44 crc kubenswrapper[4926]: I1122 11:07:44.041730 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-bw5rr"] Nov 22 11:07:44 crc kubenswrapper[4926]: I1122 11:07:44.051096 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-bw5rr"] Nov 22 11:07:44 crc kubenswrapper[4926]: I1122 11:07:44.596409 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05bbddb1-c370-4805-9f91-373535d67f52" path="/var/lib/kubelet/pods/05bbddb1-c370-4805-9f91-373535d67f52/volumes" Nov 22 11:07:46 crc kubenswrapper[4926]: I1122 11:07:46.029814 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-fh2gk"] Nov 22 11:07:46 crc kubenswrapper[4926]: I1122 11:07:46.063182 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-fh2gk"] Nov 22 11:07:46 crc kubenswrapper[4926]: I1122 11:07:46.591633 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63e63df0-e7ff-46a2-9b1d-60be115851ce" path="/var/lib/kubelet/pods/63e63df0-e7ff-46a2-9b1d-60be115851ce/volumes" Nov 22 11:07:47 crc kubenswrapper[4926]: I1122 11:07:47.417797 4926 generic.go:334] "Generic (PLEG): container finished" podID="ea024223-c658-4c22-9318-8eb14052b38f" containerID="73ac4847c028ab739b9061383bfb2ef14a2663c558c3780f19652a1234a66e4a" exitCode=0 Nov 22 11:07:47 crc kubenswrapper[4926]: I1122 11:07:47.417846 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" event={"ID":"ea024223-c658-4c22-9318-8eb14052b38f","Type":"ContainerDied","Data":"73ac4847c028ab739b9061383bfb2ef14a2663c558c3780f19652a1234a66e4a"} Nov 22 11:07:48 crc kubenswrapper[4926]: I1122 11:07:48.582966 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:07:48 crc kubenswrapper[4926]: E1122 11:07:48.583543 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:07:48 crc kubenswrapper[4926]: I1122 11:07:48.820957 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.002567 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key\") pod \"ea024223-c658-4c22-9318-8eb14052b38f\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.002675 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knbst\" (UniqueName: \"kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst\") pod \"ea024223-c658-4c22-9318-8eb14052b38f\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.002875 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory\") pod \"ea024223-c658-4c22-9318-8eb14052b38f\" (UID: \"ea024223-c658-4c22-9318-8eb14052b38f\") " Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.008176 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst" (OuterVolumeSpecName: "kube-api-access-knbst") pod "ea024223-c658-4c22-9318-8eb14052b38f" (UID: "ea024223-c658-4c22-9318-8eb14052b38f"). InnerVolumeSpecName "kube-api-access-knbst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.028727 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory" (OuterVolumeSpecName: "inventory") pod "ea024223-c658-4c22-9318-8eb14052b38f" (UID: "ea024223-c658-4c22-9318-8eb14052b38f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.035580 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ea024223-c658-4c22-9318-8eb14052b38f" (UID: "ea024223-c658-4c22-9318-8eb14052b38f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.105314 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.105362 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knbst\" (UniqueName: \"kubernetes.io/projected/ea024223-c658-4c22-9318-8eb14052b38f-kube-api-access-knbst\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.105376 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ea024223-c658-4c22-9318-8eb14052b38f-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.439006 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" event={"ID":"ea024223-c658-4c22-9318-8eb14052b38f","Type":"ContainerDied","Data":"741761e204644e1194a73fa370e953eacf9c63b600724be7b4c84d46b84418cf"} Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.439044 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="741761e204644e1194a73fa370e953eacf9c63b600724be7b4c84d46b84418cf" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.439047 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.506090 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww"] Nov 22 11:07:49 crc kubenswrapper[4926]: E1122 11:07:49.506456 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea024223-c658-4c22-9318-8eb14052b38f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.506473 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea024223-c658-4c22-9318-8eb14052b38f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.506731 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea024223-c658-4c22-9318-8eb14052b38f" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.507506 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.516043 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.516101 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.516412 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.517411 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.529584 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww"] Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.612968 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.613032 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.613359 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxxvg\" (UniqueName: \"kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.715301 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxxvg\" (UniqueName: \"kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.715399 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.715424 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.718799 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.722365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.731897 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxxvg\" (UniqueName: \"kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-jv8ww\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:49 crc kubenswrapper[4926]: I1122 11:07:49.832445 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:07:50 crc kubenswrapper[4926]: I1122 11:07:50.373443 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww"] Nov 22 11:07:50 crc kubenswrapper[4926]: I1122 11:07:50.449023 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" event={"ID":"4cd61881-efff-46ae-a9b8-ba641538d8e1","Type":"ContainerStarted","Data":"330f220503bb05a96ff76e1cd721804e342e88d173f068a381e734b048da36a5"} Nov 22 11:07:51 crc kubenswrapper[4926]: I1122 11:07:51.462878 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" event={"ID":"4cd61881-efff-46ae-a9b8-ba641538d8e1","Type":"ContainerStarted","Data":"a664f061fc1eca021425ff0dd6052a5de2eb616e3d6a8fc52b9b833b58727c5e"} Nov 22 11:07:51 crc kubenswrapper[4926]: I1122 11:07:51.480664 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" podStartSLOduration=2.048597497 podStartE2EDuration="2.480646812s" podCreationTimestamp="2025-11-22 11:07:49 +0000 UTC" firstStartedPulling="2025-11-22 11:07:50.389778376 +0000 UTC m=+1690.691383663" lastFinishedPulling="2025-11-22 11:07:50.821827671 +0000 UTC m=+1691.123432978" observedRunningTime="2025-11-22 11:07:51.478438788 +0000 UTC m=+1691.780044145" watchObservedRunningTime="2025-11-22 11:07:51.480646812 +0000 UTC m=+1691.782252099" Nov 22 11:08:01 crc kubenswrapper[4926]: I1122 11:08:01.581530 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:08:01 crc kubenswrapper[4926]: E1122 11:08:01.582255 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:08:16 crc kubenswrapper[4926]: I1122 11:08:16.582142 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:08:16 crc kubenswrapper[4926]: E1122 11:08:16.582763 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:08:20 crc kubenswrapper[4926]: I1122 11:08:20.987487 4926 scope.go:117] "RemoveContainer" containerID="41e0bea0065c6eacfb4160378ac61f9c77f893c6bc4fcb6785fb1a768f7faa4d" Nov 22 11:08:21 crc kubenswrapper[4926]: I1122 11:08:21.025797 4926 scope.go:117] "RemoveContainer" containerID="14cb77830667d8b49447cc008aa373e5dd08cbbdab2bb701152d606c6682be4c" Nov 22 11:08:21 crc kubenswrapper[4926]: I1122 11:08:21.073135 4926 scope.go:117] "RemoveContainer" containerID="0356bf16a5cb63a8ceac47397e77e5a730a5a9ae12746b24f7d325954c0874f7" Nov 22 11:08:21 crc kubenswrapper[4926]: I1122 11:08:21.127834 4926 scope.go:117] "RemoveContainer" containerID="7ecf40dc56bebd6106020e390ebded6bc6a7510562fa3d0a926841e9514a17d0" Nov 22 11:08:21 crc kubenswrapper[4926]: I1122 11:08:21.182148 4926 scope.go:117] "RemoveContainer" containerID="652341ca9c543b35f54c8c72a1597769830475881f229f288f558046f98cdc1b" Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.052272 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-adc5-account-create-update-nk5jm"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.060626 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-c378-account-create-update-8k259"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.071365 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-e910-account-create-update-xt8jl"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.095169 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-26xp6"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.102577 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-c378-account-create-update-8k259"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.110452 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-e910-account-create-update-xt8jl"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.118360 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-vnfm7"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.125468 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-adc5-account-create-update-nk5jm"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.135951 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-26xp6"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.143537 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-vnfm7"] Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.807295 4926 generic.go:334] "Generic (PLEG): container finished" podID="4cd61881-efff-46ae-a9b8-ba641538d8e1" containerID="a664f061fc1eca021425ff0dd6052a5de2eb616e3d6a8fc52b9b833b58727c5e" exitCode=0 Nov 22 11:08:27 crc kubenswrapper[4926]: I1122 11:08:27.807378 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" event={"ID":"4cd61881-efff-46ae-a9b8-ba641538d8e1","Type":"ContainerDied","Data":"a664f061fc1eca021425ff0dd6052a5de2eb616e3d6a8fc52b9b833b58727c5e"} Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.047164 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-wpt7g"] Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.058942 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-wpt7g"] Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.600696 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="098e9322-614e-4c4a-9fca-6479e57018d7" path="/var/lib/kubelet/pods/098e9322-614e-4c4a-9fca-6479e57018d7/volumes" Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.602192 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="219be0fb-9881-4029-8ab1-03a46157cd21" path="/var/lib/kubelet/pods/219be0fb-9881-4029-8ab1-03a46157cd21/volumes" Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.603283 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30ad455a-8882-418d-ac1f-4ba80ba554af" path="/var/lib/kubelet/pods/30ad455a-8882-418d-ac1f-4ba80ba554af/volumes" Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.604468 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e45e318-c0a2-473e-8390-aaa51821b3f8" path="/var/lib/kubelet/pods/9e45e318-c0a2-473e-8390-aaa51821b3f8/volumes" Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.606794 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2c9d3cf-6437-4b11-95b0-437c2d2eed0b" path="/var/lib/kubelet/pods/b2c9d3cf-6437-4b11-95b0-437c2d2eed0b/volumes" Nov 22 11:08:28 crc kubenswrapper[4926]: I1122 11:08:28.608101 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d09ba1b7-6aa2-4c58-a281-1cee30d6c27d" path="/var/lib/kubelet/pods/d09ba1b7-6aa2-4c58-a281-1cee30d6c27d/volumes" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.165792 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.307545 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key\") pod \"4cd61881-efff-46ae-a9b8-ba641538d8e1\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.307633 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory\") pod \"4cd61881-efff-46ae-a9b8-ba641538d8e1\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.307673 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxxvg\" (UniqueName: \"kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg\") pod \"4cd61881-efff-46ae-a9b8-ba641538d8e1\" (UID: \"4cd61881-efff-46ae-a9b8-ba641538d8e1\") " Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.312936 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg" (OuterVolumeSpecName: "kube-api-access-wxxvg") pod "4cd61881-efff-46ae-a9b8-ba641538d8e1" (UID: "4cd61881-efff-46ae-a9b8-ba641538d8e1"). InnerVolumeSpecName "kube-api-access-wxxvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.333547 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4cd61881-efff-46ae-a9b8-ba641538d8e1" (UID: "4cd61881-efff-46ae-a9b8-ba641538d8e1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.334497 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory" (OuterVolumeSpecName: "inventory") pod "4cd61881-efff-46ae-a9b8-ba641538d8e1" (UID: "4cd61881-efff-46ae-a9b8-ba641538d8e1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.410169 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.410206 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4cd61881-efff-46ae-a9b8-ba641538d8e1-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.410220 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxxvg\" (UniqueName: \"kubernetes.io/projected/4cd61881-efff-46ae-a9b8-ba641538d8e1-kube-api-access-wxxvg\") on node \"crc\" DevicePath \"\"" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.583875 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:08:29 crc kubenswrapper[4926]: E1122 11:08:29.585123 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.824569 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" event={"ID":"4cd61881-efff-46ae-a9b8-ba641538d8e1","Type":"ContainerDied","Data":"330f220503bb05a96ff76e1cd721804e342e88d173f068a381e734b048da36a5"} Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.824617 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="330f220503bb05a96ff76e1cd721804e342e88d173f068a381e734b048da36a5" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.824617 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-jv8ww" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.906761 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df"] Nov 22 11:08:29 crc kubenswrapper[4926]: E1122 11:08:29.907247 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cd61881-efff-46ae-a9b8-ba641538d8e1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.907271 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cd61881-efff-46ae-a9b8-ba641538d8e1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.907504 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cd61881-efff-46ae-a9b8-ba641538d8e1" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.908308 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.910751 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.911069 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.911497 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.912575 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:08:29 crc kubenswrapper[4926]: I1122 11:08:29.928983 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df"] Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.020519 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.020879 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4s67\" (UniqueName: \"kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.021067 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.123068 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.123181 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.123232 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4s67\" (UniqueName: \"kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.128588 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.129412 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.145325 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4s67\" (UniqueName: \"kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-mk5df\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.228377 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.739631 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df"] Nov 22 11:08:30 crc kubenswrapper[4926]: I1122 11:08:30.834456 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" event={"ID":"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb","Type":"ContainerStarted","Data":"333ccd6216a2034d1c056bebc159eca996ff7c847546d607ccda687b74f213a6"} Nov 22 11:08:31 crc kubenswrapper[4926]: I1122 11:08:31.844931 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" event={"ID":"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb","Type":"ContainerStarted","Data":"b0c1f2f534767094020d7a4dcafdb083952ba6022bf4a361c425d65b212c3542"} Nov 22 11:08:31 crc kubenswrapper[4926]: I1122 11:08:31.866985 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" podStartSLOduration=2.215163596 podStartE2EDuration="2.866951665s" podCreationTimestamp="2025-11-22 11:08:29 +0000 UTC" firstStartedPulling="2025-11-22 11:08:30.746794927 +0000 UTC m=+1731.048400214" lastFinishedPulling="2025-11-22 11:08:31.398582966 +0000 UTC m=+1731.700188283" observedRunningTime="2025-11-22 11:08:31.862545409 +0000 UTC m=+1732.164150736" watchObservedRunningTime="2025-11-22 11:08:31.866951665 +0000 UTC m=+1732.168556992" Nov 22 11:08:40 crc kubenswrapper[4926]: I1122 11:08:40.588434 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:08:40 crc kubenswrapper[4926]: E1122 11:08:40.589286 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:08:54 crc kubenswrapper[4926]: I1122 11:08:54.582149 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:08:54 crc kubenswrapper[4926]: E1122 11:08:54.582935 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:08:59 crc kubenswrapper[4926]: I1122 11:08:59.050442 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8kkgp"] Nov 22 11:08:59 crc kubenswrapper[4926]: I1122 11:08:59.061084 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8kkgp"] Nov 22 11:09:00 crc kubenswrapper[4926]: I1122 11:09:00.594532 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d43169d-e199-4fe3-85d7-c39acd736eb6" path="/var/lib/kubelet/pods/6d43169d-e199-4fe3-85d7-c39acd736eb6/volumes" Nov 22 11:09:09 crc kubenswrapper[4926]: I1122 11:09:09.583737 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:09:09 crc kubenswrapper[4926]: E1122 11:09:09.584537 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:09:16 crc kubenswrapper[4926]: I1122 11:09:16.032699 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-kwhs5"] Nov 22 11:09:16 crc kubenswrapper[4926]: I1122 11:09:16.042493 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-kwhs5"] Nov 22 11:09:16 crc kubenswrapper[4926]: I1122 11:09:16.593566 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85f46bca-0fff-445a-9c52-c0f4478105ff" path="/var/lib/kubelet/pods/85f46bca-0fff-445a-9c52-c0f4478105ff/volumes" Nov 22 11:09:17 crc kubenswrapper[4926]: I1122 11:09:17.027705 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vwr2z"] Nov 22 11:09:17 crc kubenswrapper[4926]: I1122 11:09:17.035666 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vwr2z"] Nov 22 11:09:18 crc kubenswrapper[4926]: I1122 11:09:18.593227 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89f5e6c8-c807-4ec1-80d1-7b0ac0192d38" path="/var/lib/kubelet/pods/89f5e6c8-c807-4ec1-80d1-7b0ac0192d38/volumes" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.304518 4926 generic.go:334] "Generic (PLEG): container finished" podID="e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" containerID="b0c1f2f534767094020d7a4dcafdb083952ba6022bf4a361c425d65b212c3542" exitCode=0 Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.304566 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" event={"ID":"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb","Type":"ContainerDied","Data":"b0c1f2f534767094020d7a4dcafdb083952ba6022bf4a361c425d65b212c3542"} Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.340380 4926 scope.go:117] "RemoveContainer" containerID="4a1866c8fc59d617f6dd96e52249df564a92df3cd244b642800560f5dfe36561" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.379810 4926 scope.go:117] "RemoveContainer" containerID="09d69004abeb128e3b90c88330ccebdea80ff6ca598bdbce8e44986c3614821f" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.438905 4926 scope.go:117] "RemoveContainer" containerID="a265a4adc4f734e4f1e63b892e50a1371d9b186c0748c492f323cf616680143b" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.462775 4926 scope.go:117] "RemoveContainer" containerID="f7e428f06abb5ca1a34c54b719c039beedcb72b7a3cdef3bc77141e8798cd2de" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.500553 4926 scope.go:117] "RemoveContainer" containerID="6ea34daad8e79e0f8c03f6392333f7819c2a44de3e51b71bdfef454d111c56c0" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.571045 4926 scope.go:117] "RemoveContainer" containerID="51817e95860a44ff6c51a5a514b9d2620d7d561b8222d25088ebf15a8603c5de" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.582696 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:09:21 crc kubenswrapper[4926]: E1122 11:09:21.582964 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.598156 4926 scope.go:117] "RemoveContainer" containerID="f7a0c0e8acaff98b4b176b21ff6140648e238268a03e5e05399775af2365981b" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.617080 4926 scope.go:117] "RemoveContainer" containerID="47a1f2be6d44036898be3920ddd7a45f29eb3b3ddce8d18a7d2b15814ab77f88" Nov 22 11:09:21 crc kubenswrapper[4926]: I1122 11:09:21.640186 4926 scope.go:117] "RemoveContainer" containerID="cffb02ee83fc4e92af6189933f13586d0707da9320fab4ca476129e6fbc57eee" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.719149 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.845649 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key\") pod \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.845751 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4s67\" (UniqueName: \"kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67\") pod \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.845790 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory\") pod \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\" (UID: \"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb\") " Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.854091 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67" (OuterVolumeSpecName: "kube-api-access-r4s67") pod "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" (UID: "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb"). InnerVolumeSpecName "kube-api-access-r4s67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.874618 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory" (OuterVolumeSpecName: "inventory") pod "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" (UID: "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.885037 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" (UID: "e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.948421 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.948467 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4s67\" (UniqueName: \"kubernetes.io/projected/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-kube-api-access-r4s67\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:22 crc kubenswrapper[4926]: I1122 11:09:22.948486 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.323590 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" event={"ID":"e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb","Type":"ContainerDied","Data":"333ccd6216a2034d1c056bebc159eca996ff7c847546d607ccda687b74f213a6"} Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.323627 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="333ccd6216a2034d1c056bebc159eca996ff7c847546d607ccda687b74f213a6" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.323627 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-mk5df" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.406656 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fvg2t"] Nov 22 11:09:23 crc kubenswrapper[4926]: E1122 11:09:23.407076 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.407092 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.407261 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.407835 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.411614 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.411655 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.411851 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.412442 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.420180 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fvg2t"] Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.559403 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.559632 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.559868 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrzp8\" (UniqueName: \"kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.662105 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.662183 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.662225 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrzp8\" (UniqueName: \"kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.667081 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.675854 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.679352 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrzp8\" (UniqueName: \"kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8\") pod \"ssh-known-hosts-edpm-deployment-fvg2t\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:23 crc kubenswrapper[4926]: I1122 11:09:23.728072 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:24 crc kubenswrapper[4926]: I1122 11:09:24.247796 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fvg2t"] Nov 22 11:09:24 crc kubenswrapper[4926]: I1122 11:09:24.333513 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" event={"ID":"474538df-0433-4fb9-b2c2-ed291078d237","Type":"ContainerStarted","Data":"8e2cd78f22616a7c5d614921b3b9a61fb3ccd167432aadbe539b521bf78a53bf"} Nov 22 11:09:25 crc kubenswrapper[4926]: I1122 11:09:25.344095 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" event={"ID":"474538df-0433-4fb9-b2c2-ed291078d237","Type":"ContainerStarted","Data":"4f6bf977d816fb575a1ca47c7f58bfd5bc79069395dffacd36bfcb8b06f6c489"} Nov 22 11:09:25 crc kubenswrapper[4926]: I1122 11:09:25.363545 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" podStartSLOduration=1.8592316580000001 podStartE2EDuration="2.363506409s" podCreationTimestamp="2025-11-22 11:09:23 +0000 UTC" firstStartedPulling="2025-11-22 11:09:24.247671715 +0000 UTC m=+1784.549277012" lastFinishedPulling="2025-11-22 11:09:24.751946466 +0000 UTC m=+1785.053551763" observedRunningTime="2025-11-22 11:09:25.358595848 +0000 UTC m=+1785.660201135" watchObservedRunningTime="2025-11-22 11:09:25.363506409 +0000 UTC m=+1785.665111696" Nov 22 11:09:32 crc kubenswrapper[4926]: I1122 11:09:32.422679 4926 generic.go:334] "Generic (PLEG): container finished" podID="474538df-0433-4fb9-b2c2-ed291078d237" containerID="4f6bf977d816fb575a1ca47c7f58bfd5bc79069395dffacd36bfcb8b06f6c489" exitCode=0 Nov 22 11:09:32 crc kubenswrapper[4926]: I1122 11:09:32.422757 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" event={"ID":"474538df-0433-4fb9-b2c2-ed291078d237","Type":"ContainerDied","Data":"4f6bf977d816fb575a1ca47c7f58bfd5bc79069395dffacd36bfcb8b06f6c489"} Nov 22 11:09:33 crc kubenswrapper[4926]: I1122 11:09:33.854127 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:33 crc kubenswrapper[4926]: I1122 11:09:33.983729 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0\") pod \"474538df-0433-4fb9-b2c2-ed291078d237\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " Nov 22 11:09:33 crc kubenswrapper[4926]: I1122 11:09:33.984022 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrzp8\" (UniqueName: \"kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8\") pod \"474538df-0433-4fb9-b2c2-ed291078d237\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " Nov 22 11:09:33 crc kubenswrapper[4926]: I1122 11:09:33.984060 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam\") pod \"474538df-0433-4fb9-b2c2-ed291078d237\" (UID: \"474538df-0433-4fb9-b2c2-ed291078d237\") " Nov 22 11:09:33 crc kubenswrapper[4926]: I1122 11:09:33.990380 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8" (OuterVolumeSpecName: "kube-api-access-xrzp8") pod "474538df-0433-4fb9-b2c2-ed291078d237" (UID: "474538df-0433-4fb9-b2c2-ed291078d237"). InnerVolumeSpecName "kube-api-access-xrzp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.017044 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "474538df-0433-4fb9-b2c2-ed291078d237" (UID: "474538df-0433-4fb9-b2c2-ed291078d237"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.017100 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "474538df-0433-4fb9-b2c2-ed291078d237" (UID: "474538df-0433-4fb9-b2c2-ed291078d237"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.085946 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrzp8\" (UniqueName: \"kubernetes.io/projected/474538df-0433-4fb9-b2c2-ed291078d237-kube-api-access-xrzp8\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.085983 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.085993 4926 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/474538df-0433-4fb9-b2c2-ed291078d237-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.451426 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" event={"ID":"474538df-0433-4fb9-b2c2-ed291078d237","Type":"ContainerDied","Data":"8e2cd78f22616a7c5d614921b3b9a61fb3ccd167432aadbe539b521bf78a53bf"} Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.451476 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e2cd78f22616a7c5d614921b3b9a61fb3ccd167432aadbe539b521bf78a53bf" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.451536 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fvg2t" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.527260 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4"] Nov 22 11:09:34 crc kubenswrapper[4926]: E1122 11:09:34.527737 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="474538df-0433-4fb9-b2c2-ed291078d237" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.527761 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="474538df-0433-4fb9-b2c2-ed291078d237" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.528003 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="474538df-0433-4fb9-b2c2-ed291078d237" containerName="ssh-known-hosts-edpm-deployment" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.528752 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.531485 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.532082 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.532310 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.532334 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.539106 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4"] Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.582162 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:09:34 crc kubenswrapper[4926]: E1122 11:09:34.582707 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.700781 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.700960 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.701018 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mnzr\" (UniqueName: \"kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.803231 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.803339 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.803364 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mnzr\" (UniqueName: \"kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.809004 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.816480 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.839558 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mnzr\" (UniqueName: \"kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-td6g4\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:34 crc kubenswrapper[4926]: I1122 11:09:34.851573 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:35 crc kubenswrapper[4926]: I1122 11:09:35.386943 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4"] Nov 22 11:09:35 crc kubenswrapper[4926]: I1122 11:09:35.463331 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" event={"ID":"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1","Type":"ContainerStarted","Data":"4adad36a294da277027b9e4494792ab3e41539482fac6da258f056ef72d84a03"} Nov 22 11:09:36 crc kubenswrapper[4926]: I1122 11:09:36.475230 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" event={"ID":"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1","Type":"ContainerStarted","Data":"691da2d8c82767037f21decbab8829a56ef18ddb8b87f61e5fcd8dbc6fb6e984"} Nov 22 11:09:36 crc kubenswrapper[4926]: I1122 11:09:36.502242 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" podStartSLOduration=2.036450493 podStartE2EDuration="2.502218717s" podCreationTimestamp="2025-11-22 11:09:34 +0000 UTC" firstStartedPulling="2025-11-22 11:09:35.400281272 +0000 UTC m=+1795.701886579" lastFinishedPulling="2025-11-22 11:09:35.866049506 +0000 UTC m=+1796.167654803" observedRunningTime="2025-11-22 11:09:36.494769363 +0000 UTC m=+1796.796374660" watchObservedRunningTime="2025-11-22 11:09:36.502218717 +0000 UTC m=+1796.803824014" Nov 22 11:09:44 crc kubenswrapper[4926]: I1122 11:09:44.547213 4926 generic.go:334] "Generic (PLEG): container finished" podID="ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" containerID="691da2d8c82767037f21decbab8829a56ef18ddb8b87f61e5fcd8dbc6fb6e984" exitCode=0 Nov 22 11:09:44 crc kubenswrapper[4926]: I1122 11:09:44.547289 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" event={"ID":"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1","Type":"ContainerDied","Data":"691da2d8c82767037f21decbab8829a56ef18ddb8b87f61e5fcd8dbc6fb6e984"} Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.002030 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.132655 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key\") pod \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.133140 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory\") pod \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.133176 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mnzr\" (UniqueName: \"kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr\") pod \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\" (UID: \"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1\") " Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.139227 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr" (OuterVolumeSpecName: "kube-api-access-9mnzr") pod "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" (UID: "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1"). InnerVolumeSpecName "kube-api-access-9mnzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.167643 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" (UID: "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.187349 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory" (OuterVolumeSpecName: "inventory") pod "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" (UID: "ca7c4fa4-7055-4d7a-9147-ae64dd195ae1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.235832 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mnzr\" (UniqueName: \"kubernetes.io/projected/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-kube-api-access-9mnzr\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.235935 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.235963 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ca7c4fa4-7055-4d7a-9147-ae64dd195ae1-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.571901 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" event={"ID":"ca7c4fa4-7055-4d7a-9147-ae64dd195ae1","Type":"ContainerDied","Data":"4adad36a294da277027b9e4494792ab3e41539482fac6da258f056ef72d84a03"} Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.571951 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4adad36a294da277027b9e4494792ab3e41539482fac6da258f056ef72d84a03" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.571952 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-td6g4" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.779768 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq"] Nov 22 11:09:46 crc kubenswrapper[4926]: E1122 11:09:46.780291 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.780315 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.780538 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca7c4fa4-7055-4d7a-9147-ae64dd195ae1" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.781297 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.787056 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.787434 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.787443 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.787488 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.789687 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq"] Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.970653 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.970819 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:46 crc kubenswrapper[4926]: I1122 11:09:46.970961 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c2nb\" (UniqueName: \"kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.072807 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.072945 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c2nb\" (UniqueName: \"kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.073077 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.077643 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.079675 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.092320 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c2nb\" (UniqueName: \"kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.103764 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.583078 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:09:47 crc kubenswrapper[4926]: E1122 11:09:47.583708 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:09:47 crc kubenswrapper[4926]: I1122 11:09:47.614509 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq"] Nov 22 11:09:48 crc kubenswrapper[4926]: I1122 11:09:48.601831 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" event={"ID":"07bbb761-300d-4592-9c67-27e85a79e770","Type":"ContainerStarted","Data":"64671905c4d51797a8ac9f9905f87d4ebec4337ce3920ad2bceb6dce73d5af74"} Nov 22 11:09:48 crc kubenswrapper[4926]: I1122 11:09:48.602233 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" event={"ID":"07bbb761-300d-4592-9c67-27e85a79e770","Type":"ContainerStarted","Data":"9158be110695c7ce2428fd53ea5c2ba319c76f96c928f56a0fa0ff28083fc348"} Nov 22 11:09:48 crc kubenswrapper[4926]: I1122 11:09:48.621409 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" podStartSLOduration=2.197810747 podStartE2EDuration="2.621367018s" podCreationTimestamp="2025-11-22 11:09:46 +0000 UTC" firstStartedPulling="2025-11-22 11:09:47.61885653 +0000 UTC m=+1807.920461817" lastFinishedPulling="2025-11-22 11:09:48.042412791 +0000 UTC m=+1808.344018088" observedRunningTime="2025-11-22 11:09:48.608957951 +0000 UTC m=+1808.910563248" watchObservedRunningTime="2025-11-22 11:09:48.621367018 +0000 UTC m=+1808.922972325" Nov 22 11:09:57 crc kubenswrapper[4926]: I1122 11:09:57.667370 4926 generic.go:334] "Generic (PLEG): container finished" podID="07bbb761-300d-4592-9c67-27e85a79e770" containerID="64671905c4d51797a8ac9f9905f87d4ebec4337ce3920ad2bceb6dce73d5af74" exitCode=0 Nov 22 11:09:57 crc kubenswrapper[4926]: I1122 11:09:57.667459 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" event={"ID":"07bbb761-300d-4592-9c67-27e85a79e770","Type":"ContainerDied","Data":"64671905c4d51797a8ac9f9905f87d4ebec4337ce3920ad2bceb6dce73d5af74"} Nov 22 11:09:58 crc kubenswrapper[4926]: I1122 11:09:58.582224 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:09:58 crc kubenswrapper[4926]: E1122 11:09:58.582494 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.040400 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.121520 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory\") pod \"07bbb761-300d-4592-9c67-27e85a79e770\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.121620 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key\") pod \"07bbb761-300d-4592-9c67-27e85a79e770\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.121695 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c2nb\" (UniqueName: \"kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb\") pod \"07bbb761-300d-4592-9c67-27e85a79e770\" (UID: \"07bbb761-300d-4592-9c67-27e85a79e770\") " Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.133159 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb" (OuterVolumeSpecName: "kube-api-access-7c2nb") pod "07bbb761-300d-4592-9c67-27e85a79e770" (UID: "07bbb761-300d-4592-9c67-27e85a79e770"). InnerVolumeSpecName "kube-api-access-7c2nb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.159588 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "07bbb761-300d-4592-9c67-27e85a79e770" (UID: "07bbb761-300d-4592-9c67-27e85a79e770"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.167060 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory" (OuterVolumeSpecName: "inventory") pod "07bbb761-300d-4592-9c67-27e85a79e770" (UID: "07bbb761-300d-4592-9c67-27e85a79e770"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.223483 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.223536 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c2nb\" (UniqueName: \"kubernetes.io/projected/07bbb761-300d-4592-9c67-27e85a79e770-kube-api-access-7c2nb\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.223549 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/07bbb761-300d-4592-9c67-27e85a79e770-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.481686 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6"] Nov 22 11:09:59 crc kubenswrapper[4926]: E1122 11:09:59.482275 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07bbb761-300d-4592-9c67-27e85a79e770" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.482306 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="07bbb761-300d-4592-9c67-27e85a79e770" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.482635 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="07bbb761-300d-4592-9c67-27e85a79e770" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.483629 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.487517 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.488077 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.488213 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.490962 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.500370 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6"] Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528683 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvpxf\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528723 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528749 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528790 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528854 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528880 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528930 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.528953 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529144 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529215 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529316 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529387 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529420 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.529454 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.630959 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631010 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631069 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631098 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631136 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631158 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631194 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631227 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631274 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvpxf\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631295 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631315 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631383 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631435 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.631462 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.635819 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636159 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636447 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636558 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636706 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636930 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.636871 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.638184 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.638263 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.638376 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.638790 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.642524 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.642632 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.648963 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvpxf\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-gqff6\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.705910 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" event={"ID":"07bbb761-300d-4592-9c67-27e85a79e770","Type":"ContainerDied","Data":"9158be110695c7ce2428fd53ea5c2ba319c76f96c928f56a0fa0ff28083fc348"} Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.706401 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9158be110695c7ce2428fd53ea5c2ba319c76f96c928f56a0fa0ff28083fc348" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.705960 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq" Nov 22 11:09:59 crc kubenswrapper[4926]: I1122 11:09:59.818648 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:10:00 crc kubenswrapper[4926]: I1122 11:10:00.327318 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6"] Nov 22 11:10:00 crc kubenswrapper[4926]: I1122 11:10:00.716775 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" event={"ID":"f456f1f9-7676-4426-810c-6057111ed942","Type":"ContainerStarted","Data":"7323e426dbf327377b3e0d03f5ebc738c708d3224030b3f6e849db233ff1479c"} Nov 22 11:10:01 crc kubenswrapper[4926]: I1122 11:10:01.729394 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" event={"ID":"f456f1f9-7676-4426-810c-6057111ed942","Type":"ContainerStarted","Data":"2a1f71d69bd25b65bef2e4397ab348389739cb20b11cf6b423672dc2675b0839"} Nov 22 11:10:01 crc kubenswrapper[4926]: I1122 11:10:01.753498 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" podStartSLOduration=2.31983366 podStartE2EDuration="2.753478346s" podCreationTimestamp="2025-11-22 11:09:59 +0000 UTC" firstStartedPulling="2025-11-22 11:10:00.332414426 +0000 UTC m=+1820.634019713" lastFinishedPulling="2025-11-22 11:10:00.766059112 +0000 UTC m=+1821.067664399" observedRunningTime="2025-11-22 11:10:01.745601589 +0000 UTC m=+1822.047206886" watchObservedRunningTime="2025-11-22 11:10:01.753478346 +0000 UTC m=+1822.055083633" Nov 22 11:10:02 crc kubenswrapper[4926]: I1122 11:10:02.040335 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-nkcgs"] Nov 22 11:10:02 crc kubenswrapper[4926]: I1122 11:10:02.050485 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-nkcgs"] Nov 22 11:10:02 crc kubenswrapper[4926]: I1122 11:10:02.592685 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d9f7681-83ae-40f4-b959-809724eb2498" path="/var/lib/kubelet/pods/3d9f7681-83ae-40f4-b959-809724eb2498/volumes" Nov 22 11:10:09 crc kubenswrapper[4926]: I1122 11:10:09.582877 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:10:09 crc kubenswrapper[4926]: E1122 11:10:09.583714 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:10:21 crc kubenswrapper[4926]: I1122 11:10:21.909928 4926 scope.go:117] "RemoveContainer" containerID="28fa4e89ea0a1b5044fbef33815c56031c037d853fe108da25c0e3a318c6d44b" Nov 22 11:10:23 crc kubenswrapper[4926]: I1122 11:10:23.582989 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:10:23 crc kubenswrapper[4926]: E1122 11:10:23.583607 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:10:35 crc kubenswrapper[4926]: I1122 11:10:35.581547 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:10:35 crc kubenswrapper[4926]: E1122 11:10:35.582178 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:10:39 crc kubenswrapper[4926]: I1122 11:10:39.059429 4926 generic.go:334] "Generic (PLEG): container finished" podID="f456f1f9-7676-4426-810c-6057111ed942" containerID="2a1f71d69bd25b65bef2e4397ab348389739cb20b11cf6b423672dc2675b0839" exitCode=0 Nov 22 11:10:39 crc kubenswrapper[4926]: I1122 11:10:39.059638 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" event={"ID":"f456f1f9-7676-4426-810c-6057111ed942","Type":"ContainerDied","Data":"2a1f71d69bd25b65bef2e4397ab348389739cb20b11cf6b423672dc2675b0839"} Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.460806 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612258 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612299 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612341 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612378 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612408 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612451 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612513 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612534 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612576 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612617 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612635 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612657 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvpxf\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612723 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.612777 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0\") pod \"f456f1f9-7676-4426-810c-6057111ed942\" (UID: \"f456f1f9-7676-4426-810c-6057111ed942\") " Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.619131 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.620955 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.621023 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.621241 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.621445 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.621899 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.622806 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.623382 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.624254 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf" (OuterVolumeSpecName: "kube-api-access-cvpxf") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "kube-api-access-cvpxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.626553 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.626693 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.638667 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.644323 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.647062 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory" (OuterVolumeSpecName: "inventory") pod "f456f1f9-7676-4426-810c-6057111ed942" (UID: "f456f1f9-7676-4426-810c-6057111ed942"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715281 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715320 4926 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715333 4926 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715346 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715357 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715367 4926 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715381 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715391 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715402 4926 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715412 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715423 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715434 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715445 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvpxf\" (UniqueName: \"kubernetes.io/projected/f456f1f9-7676-4426-810c-6057111ed942-kube-api-access-cvpxf\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:40 crc kubenswrapper[4926]: I1122 11:10:40.715454 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f456f1f9-7676-4426-810c-6057111ed942-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.097160 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" event={"ID":"f456f1f9-7676-4426-810c-6057111ed942","Type":"ContainerDied","Data":"7323e426dbf327377b3e0d03f5ebc738c708d3224030b3f6e849db233ff1479c"} Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.097192 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7323e426dbf327377b3e0d03f5ebc738c708d3224030b3f6e849db233ff1479c" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.097239 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-gqff6" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.232633 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g"] Nov 22 11:10:41 crc kubenswrapper[4926]: E1122 11:10:41.233615 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f456f1f9-7676-4426-810c-6057111ed942" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.233647 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="f456f1f9-7676-4426-810c-6057111ed942" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.234007 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="f456f1f9-7676-4426-810c-6057111ed942" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.235000 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.237062 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.241817 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.243214 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.244045 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.257110 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g"] Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.272275 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.325936 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.326240 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.326472 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.326721 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq565\" (UniqueName: \"kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.326810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.428847 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.428907 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.428970 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq565\" (UniqueName: \"kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.429000 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.429083 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.429839 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.438998 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.439012 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.439636 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.446710 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq565\" (UniqueName: \"kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-xgx5g\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.577813 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:10:41 crc kubenswrapper[4926]: I1122 11:10:41.585974 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:10:42 crc kubenswrapper[4926]: I1122 11:10:42.085942 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g"] Nov 22 11:10:42 crc kubenswrapper[4926]: I1122 11:10:42.105170 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" event={"ID":"060a3d68-c5b3-4788-8c63-ce0b6d67acc5","Type":"ContainerStarted","Data":"1b34a50996bddf6ca46e98cca4b38020905420679fa92c55f8b6f232bb7906d2"} Nov 22 11:10:42 crc kubenswrapper[4926]: I1122 11:10:42.609667 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:10:43 crc kubenswrapper[4926]: I1122 11:10:43.120778 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" event={"ID":"060a3d68-c5b3-4788-8c63-ce0b6d67acc5","Type":"ContainerStarted","Data":"2a0834f4165a00049b7b4a15d8ce670b0672e9e4117cb096fd032519abb9c210"} Nov 22 11:10:43 crc kubenswrapper[4926]: I1122 11:10:43.142027 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" podStartSLOduration=1.6262994659999999 podStartE2EDuration="2.142008086s" podCreationTimestamp="2025-11-22 11:10:41 +0000 UTC" firstStartedPulling="2025-11-22 11:10:42.089817617 +0000 UTC m=+1862.391422904" lastFinishedPulling="2025-11-22 11:10:42.605526237 +0000 UTC m=+1862.907131524" observedRunningTime="2025-11-22 11:10:43.133019877 +0000 UTC m=+1863.434625164" watchObservedRunningTime="2025-11-22 11:10:43.142008086 +0000 UTC m=+1863.443613373" Nov 22 11:10:48 crc kubenswrapper[4926]: I1122 11:10:48.582972 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:10:48 crc kubenswrapper[4926]: E1122 11:10:48.583512 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:11:03 crc kubenswrapper[4926]: I1122 11:11:03.582442 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:11:03 crc kubenswrapper[4926]: E1122 11:11:03.583688 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:11:14 crc kubenswrapper[4926]: I1122 11:11:14.583380 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:11:14 crc kubenswrapper[4926]: E1122 11:11:14.584191 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:11:28 crc kubenswrapper[4926]: I1122 11:11:28.582857 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:11:28 crc kubenswrapper[4926]: E1122 11:11:28.583850 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:11:41 crc kubenswrapper[4926]: I1122 11:11:41.626089 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:11:42 crc kubenswrapper[4926]: I1122 11:11:42.733618 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b"} Nov 22 11:11:45 crc kubenswrapper[4926]: I1122 11:11:45.763169 4926 generic.go:334] "Generic (PLEG): container finished" podID="060a3d68-c5b3-4788-8c63-ce0b6d67acc5" containerID="2a0834f4165a00049b7b4a15d8ce670b0672e9e4117cb096fd032519abb9c210" exitCode=0 Nov 22 11:11:45 crc kubenswrapper[4926]: I1122 11:11:45.763226 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" event={"ID":"060a3d68-c5b3-4788-8c63-ce0b6d67acc5","Type":"ContainerDied","Data":"2a0834f4165a00049b7b4a15d8ce670b0672e9e4117cb096fd032519abb9c210"} Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.193966 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.332953 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle\") pod \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.333066 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq565\" (UniqueName: \"kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565\") pod \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.333097 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory\") pod \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.333143 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key\") pod \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.333243 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0\") pod \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\" (UID: \"060a3d68-c5b3-4788-8c63-ce0b6d67acc5\") " Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.339883 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565" (OuterVolumeSpecName: "kube-api-access-cq565") pod "060a3d68-c5b3-4788-8c63-ce0b6d67acc5" (UID: "060a3d68-c5b3-4788-8c63-ce0b6d67acc5"). InnerVolumeSpecName "kube-api-access-cq565". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.340351 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "060a3d68-c5b3-4788-8c63-ce0b6d67acc5" (UID: "060a3d68-c5b3-4788-8c63-ce0b6d67acc5"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.362309 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "060a3d68-c5b3-4788-8c63-ce0b6d67acc5" (UID: "060a3d68-c5b3-4788-8c63-ce0b6d67acc5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.364957 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "060a3d68-c5b3-4788-8c63-ce0b6d67acc5" (UID: "060a3d68-c5b3-4788-8c63-ce0b6d67acc5"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.368138 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory" (OuterVolumeSpecName: "inventory") pod "060a3d68-c5b3-4788-8c63-ce0b6d67acc5" (UID: "060a3d68-c5b3-4788-8c63-ce0b6d67acc5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.435566 4926 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.435600 4926 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.435610 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq565\" (UniqueName: \"kubernetes.io/projected/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-kube-api-access-cq565\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.435619 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.435627 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/060a3d68-c5b3-4788-8c63-ce0b6d67acc5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.790847 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" event={"ID":"060a3d68-c5b3-4788-8c63-ce0b6d67acc5","Type":"ContainerDied","Data":"1b34a50996bddf6ca46e98cca4b38020905420679fa92c55f8b6f232bb7906d2"} Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.790903 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b34a50996bddf6ca46e98cca4b38020905420679fa92c55f8b6f232bb7906d2" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.790962 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-xgx5g" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.872113 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc"] Nov 22 11:11:47 crc kubenswrapper[4926]: E1122 11:11:47.872560 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060a3d68-c5b3-4788-8c63-ce0b6d67acc5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.872580 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="060a3d68-c5b3-4788-8c63-ce0b6d67acc5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.872823 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="060a3d68-c5b3-4788-8c63-ce0b6d67acc5" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.873563 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876407 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876746 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876875 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876774 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876801 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.876831 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.882499 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc"] Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.942933 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.943017 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts99g\" (UniqueName: \"kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.943148 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.943241 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.943290 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:47 crc kubenswrapper[4926]: I1122 11:11:47.943512 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: E1122 11:11:48.014550 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod060a3d68_c5b3_4788_8c63_ce0b6d67acc5.slice/crio-1b34a50996bddf6ca46e98cca4b38020905420679fa92c55f8b6f232bb7906d2\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod060a3d68_c5b3_4788_8c63_ce0b6d67acc5.slice\": RecentStats: unable to find data in memory cache]" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045491 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts99g\" (UniqueName: \"kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045580 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045612 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045692 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.045754 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.050733 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.050969 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.053571 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.054086 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.055006 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.067270 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts99g\" (UniqueName: \"kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.194749 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.732261 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc"] Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.743306 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:11:48 crc kubenswrapper[4926]: I1122 11:11:48.799479 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" event={"ID":"e65a3423-36b6-48c5-b170-989f64801105","Type":"ContainerStarted","Data":"c45d0788548f05d71850ba1241a7ad5bf055f5f79ed4dc92eaad172c2790f225"} Nov 22 11:11:49 crc kubenswrapper[4926]: I1122 11:11:49.810063 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" event={"ID":"e65a3423-36b6-48c5-b170-989f64801105","Type":"ContainerStarted","Data":"93f35258f5af4647259d9a220696a9c6bd92b8d87c115fe21f0da1fe0e468a37"} Nov 22 11:11:49 crc kubenswrapper[4926]: I1122 11:11:49.847732 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" podStartSLOduration=2.386395153 podStartE2EDuration="2.847708367s" podCreationTimestamp="2025-11-22 11:11:47 +0000 UTC" firstStartedPulling="2025-11-22 11:11:48.743068295 +0000 UTC m=+1929.044673572" lastFinishedPulling="2025-11-22 11:11:49.204381489 +0000 UTC m=+1929.505986786" observedRunningTime="2025-11-22 11:11:49.826779604 +0000 UTC m=+1930.128384911" watchObservedRunningTime="2025-11-22 11:11:49.847708367 +0000 UTC m=+1930.149313664" Nov 22 11:12:38 crc kubenswrapper[4926]: I1122 11:12:38.213786 4926 generic.go:334] "Generic (PLEG): container finished" podID="e65a3423-36b6-48c5-b170-989f64801105" containerID="93f35258f5af4647259d9a220696a9c6bd92b8d87c115fe21f0da1fe0e468a37" exitCode=0 Nov 22 11:12:38 crc kubenswrapper[4926]: I1122 11:12:38.213996 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" event={"ID":"e65a3423-36b6-48c5-b170-989f64801105","Type":"ContainerDied","Data":"93f35258f5af4647259d9a220696a9c6bd92b8d87c115fe21f0da1fe0e468a37"} Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.662640 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759411 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts99g\" (UniqueName: \"kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759487 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759517 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759568 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759619 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.759735 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key\") pod \"e65a3423-36b6-48c5-b170-989f64801105\" (UID: \"e65a3423-36b6-48c5-b170-989f64801105\") " Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.765938 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g" (OuterVolumeSpecName: "kube-api-access-ts99g") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "kube-api-access-ts99g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.779186 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.788013 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.788874 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory" (OuterVolumeSpecName: "inventory") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.789814 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.808330 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "e65a3423-36b6-48c5-b170-989f64801105" (UID: "e65a3423-36b6-48c5-b170-989f64801105"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.863697 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.863950 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts99g\" (UniqueName: \"kubernetes.io/projected/e65a3423-36b6-48c5-b170-989f64801105-kube-api-access-ts99g\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.864047 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.864145 4926 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.864230 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:39 crc kubenswrapper[4926]: I1122 11:12:39.864324 4926 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e65a3423-36b6-48c5-b170-989f64801105-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.229583 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" event={"ID":"e65a3423-36b6-48c5-b170-989f64801105","Type":"ContainerDied","Data":"c45d0788548f05d71850ba1241a7ad5bf055f5f79ed4dc92eaad172c2790f225"} Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.229822 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c45d0788548f05d71850ba1241a7ad5bf055f5f79ed4dc92eaad172c2790f225" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.229623 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.334054 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl"] Nov 22 11:12:40 crc kubenswrapper[4926]: E1122 11:12:40.334716 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e65a3423-36b6-48c5-b170-989f64801105" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.334804 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="e65a3423-36b6-48c5-b170-989f64801105" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.335168 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="e65a3423-36b6-48c5-b170-989f64801105" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.336052 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.338212 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.339937 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.339985 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.340271 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.342193 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.349571 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl"] Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.475459 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.475810 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.475858 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-699ct\" (UniqueName: \"kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.475900 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.475962 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.577456 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.577545 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.577596 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.577647 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.577692 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-699ct\" (UniqueName: \"kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.582722 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.582941 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.583683 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.585244 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.604645 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-699ct\" (UniqueName: \"kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:40 crc kubenswrapper[4926]: I1122 11:12:40.653446 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:12:41 crc kubenswrapper[4926]: I1122 11:12:41.234530 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl"] Nov 22 11:12:41 crc kubenswrapper[4926]: I1122 11:12:41.737346 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:12:42 crc kubenswrapper[4926]: I1122 11:12:42.246838 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" event={"ID":"64e25bf4-8746-413f-a28b-264ddfb9feff","Type":"ContainerStarted","Data":"d8c40057d9358b2dacb270e23f66f4e7afba1f457dd107025069df86f57b5ef3"} Nov 22 11:12:42 crc kubenswrapper[4926]: I1122 11:12:42.247215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" event={"ID":"64e25bf4-8746-413f-a28b-264ddfb9feff","Type":"ContainerStarted","Data":"53364ac6f03769d520382c20c457f78d697523d2cd9d19045b571662fae8021a"} Nov 22 11:12:42 crc kubenswrapper[4926]: I1122 11:12:42.272203 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" podStartSLOduration=1.777373648 podStartE2EDuration="2.272184134s" podCreationTimestamp="2025-11-22 11:12:40 +0000 UTC" firstStartedPulling="2025-11-22 11:12:41.238634626 +0000 UTC m=+1981.540239923" lastFinishedPulling="2025-11-22 11:12:41.733445092 +0000 UTC m=+1982.035050409" observedRunningTime="2025-11-22 11:12:42.265215775 +0000 UTC m=+1982.566821072" watchObservedRunningTime="2025-11-22 11:12:42.272184134 +0000 UTC m=+1982.573789421" Nov 22 11:14:09 crc kubenswrapper[4926]: I1122 11:14:09.661395 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:14:09 crc kubenswrapper[4926]: I1122 11:14:09.663369 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:14:39 crc kubenswrapper[4926]: I1122 11:14:39.661556 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:14:39 crc kubenswrapper[4926]: I1122 11:14:39.662200 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.543126 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.548502 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.570757 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.571328 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9zxb\" (UniqueName: \"kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.571417 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.571513 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.673682 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9zxb\" (UniqueName: \"kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.673755 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.673807 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.674281 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.674314 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.692642 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9zxb\" (UniqueName: \"kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb\") pod \"certified-operators-jk9mp\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:41 crc kubenswrapper[4926]: I1122 11:14:41.893774 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:42 crc kubenswrapper[4926]: I1122 11:14:42.384695 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:43 crc kubenswrapper[4926]: I1122 11:14:43.417306 4926 generic.go:334] "Generic (PLEG): container finished" podID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerID="cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d" exitCode=0 Nov 22 11:14:43 crc kubenswrapper[4926]: I1122 11:14:43.417385 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerDied","Data":"cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d"} Nov 22 11:14:43 crc kubenswrapper[4926]: I1122 11:14:43.417648 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerStarted","Data":"312c281258619b629e730ec26e86319de7a9e6ea8e1607c6dc77848605a307b9"} Nov 22 11:14:44 crc kubenswrapper[4926]: I1122 11:14:44.430049 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerStarted","Data":"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c"} Nov 22 11:14:45 crc kubenswrapper[4926]: I1122 11:14:45.439604 4926 generic.go:334] "Generic (PLEG): container finished" podID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerID="71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c" exitCode=0 Nov 22 11:14:45 crc kubenswrapper[4926]: I1122 11:14:45.439659 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerDied","Data":"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c"} Nov 22 11:14:46 crc kubenswrapper[4926]: I1122 11:14:46.451782 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerStarted","Data":"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac"} Nov 22 11:14:46 crc kubenswrapper[4926]: I1122 11:14:46.469450 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jk9mp" podStartSLOduration=2.987304952 podStartE2EDuration="5.46942498s" podCreationTimestamp="2025-11-22 11:14:41 +0000 UTC" firstStartedPulling="2025-11-22 11:14:43.419680183 +0000 UTC m=+2103.721285480" lastFinishedPulling="2025-11-22 11:14:45.901800221 +0000 UTC m=+2106.203405508" observedRunningTime="2025-11-22 11:14:46.469309256 +0000 UTC m=+2106.770914573" watchObservedRunningTime="2025-11-22 11:14:46.46942498 +0000 UTC m=+2106.771030277" Nov 22 11:14:51 crc kubenswrapper[4926]: I1122 11:14:51.893946 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:51 crc kubenswrapper[4926]: I1122 11:14:51.894652 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:51 crc kubenswrapper[4926]: I1122 11:14:51.957866 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:52 crc kubenswrapper[4926]: I1122 11:14:52.576331 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:52 crc kubenswrapper[4926]: I1122 11:14:52.625981 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.523981 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jk9mp" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="registry-server" containerID="cri-o://08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac" gracePeriod=2 Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.610954 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.613542 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.636749 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.754021 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.754115 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.754199 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqd4p\" (UniqueName: \"kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.856836 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.857092 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.857122 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqd4p\" (UniqueName: \"kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.857380 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.859279 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:54 crc kubenswrapper[4926]: I1122 11:14:54.877213 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqd4p\" (UniqueName: \"kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p\") pod \"redhat-operators-45twv\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.018037 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.048650 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.167377 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities\") pod \"3cbb2643-d578-4b40-98d8-c06e2cee809a\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.167604 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content\") pod \"3cbb2643-d578-4b40-98d8-c06e2cee809a\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.167636 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9zxb\" (UniqueName: \"kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb\") pod \"3cbb2643-d578-4b40-98d8-c06e2cee809a\" (UID: \"3cbb2643-d578-4b40-98d8-c06e2cee809a\") " Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.170859 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities" (OuterVolumeSpecName: "utilities") pod "3cbb2643-d578-4b40-98d8-c06e2cee809a" (UID: "3cbb2643-d578-4b40-98d8-c06e2cee809a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.178149 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb" (OuterVolumeSpecName: "kube-api-access-m9zxb") pod "3cbb2643-d578-4b40-98d8-c06e2cee809a" (UID: "3cbb2643-d578-4b40-98d8-c06e2cee809a"). InnerVolumeSpecName "kube-api-access-m9zxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.269835 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9zxb\" (UniqueName: \"kubernetes.io/projected/3cbb2643-d578-4b40-98d8-c06e2cee809a-kube-api-access-m9zxb\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.269872 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.296337 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3cbb2643-d578-4b40-98d8-c06e2cee809a" (UID: "3cbb2643-d578-4b40-98d8-c06e2cee809a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.371738 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3cbb2643-d578-4b40-98d8-c06e2cee809a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.534823 4926 generic.go:334] "Generic (PLEG): container finished" podID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerID="08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac" exitCode=0 Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.534922 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jk9mp" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.534935 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerDied","Data":"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac"} Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.535174 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jk9mp" event={"ID":"3cbb2643-d578-4b40-98d8-c06e2cee809a","Type":"ContainerDied","Data":"312c281258619b629e730ec26e86319de7a9e6ea8e1607c6dc77848605a307b9"} Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.535233 4926 scope.go:117] "RemoveContainer" containerID="08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.570343 4926 scope.go:117] "RemoveContainer" containerID="71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.585490 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.592485 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jk9mp"] Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.598082 4926 scope.go:117] "RemoveContainer" containerID="cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.626069 4926 scope.go:117] "RemoveContainer" containerID="08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac" Nov 22 11:14:55 crc kubenswrapper[4926]: E1122 11:14:55.626659 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac\": container with ID starting with 08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac not found: ID does not exist" containerID="08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.626693 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac"} err="failed to get container status \"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac\": rpc error: code = NotFound desc = could not find container \"08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac\": container with ID starting with 08d65562725ce9efd62af34464749d4e27ad4aae54ba824ba162d3169687a6ac not found: ID does not exist" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.626732 4926 scope.go:117] "RemoveContainer" containerID="71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c" Nov 22 11:14:55 crc kubenswrapper[4926]: E1122 11:14:55.627167 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c\": container with ID starting with 71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c not found: ID does not exist" containerID="71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.627196 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c"} err="failed to get container status \"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c\": rpc error: code = NotFound desc = could not find container \"71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c\": container with ID starting with 71feb141616aae516dc7dfc88be047c82f29f6a201b0780d9d4f3c00fb66962c not found: ID does not exist" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.627214 4926 scope.go:117] "RemoveContainer" containerID="cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d" Nov 22 11:14:55 crc kubenswrapper[4926]: E1122 11:14:55.627489 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d\": container with ID starting with cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d not found: ID does not exist" containerID="cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.627513 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d"} err="failed to get container status \"cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d\": rpc error: code = NotFound desc = could not find container \"cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d\": container with ID starting with cb999b3aa166fd61b2b3d4ccb7e553a5cab4820df23545acb17833547198537d not found: ID does not exist" Nov 22 11:14:55 crc kubenswrapper[4926]: I1122 11:14:55.630022 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:14:55 crc kubenswrapper[4926]: W1122 11:14:55.638242 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda514a3f0_32ce_4fcf_8d3b_78dd926ce852.slice/crio-967627cb017841e79caba26508573dc489454d17c81b77ff5dc2926e6e5932e0 WatchSource:0}: Error finding container 967627cb017841e79caba26508573dc489454d17c81b77ff5dc2926e6e5932e0: Status 404 returned error can't find the container with id 967627cb017841e79caba26508573dc489454d17c81b77ff5dc2926e6e5932e0 Nov 22 11:14:56 crc kubenswrapper[4926]: I1122 11:14:56.550441 4926 generic.go:334] "Generic (PLEG): container finished" podID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerID="d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59" exitCode=0 Nov 22 11:14:56 crc kubenswrapper[4926]: I1122 11:14:56.550506 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerDied","Data":"d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59"} Nov 22 11:14:56 crc kubenswrapper[4926]: I1122 11:14:56.551011 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerStarted","Data":"967627cb017841e79caba26508573dc489454d17c81b77ff5dc2926e6e5932e0"} Nov 22 11:14:56 crc kubenswrapper[4926]: I1122 11:14:56.594109 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" path="/var/lib/kubelet/pods/3cbb2643-d578-4b40-98d8-c06e2cee809a/volumes" Nov 22 11:14:57 crc kubenswrapper[4926]: I1122 11:14:57.566029 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerStarted","Data":"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a"} Nov 22 11:14:58 crc kubenswrapper[4926]: I1122 11:14:58.584235 4926 generic.go:334] "Generic (PLEG): container finished" podID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerID="ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a" exitCode=0 Nov 22 11:14:58 crc kubenswrapper[4926]: I1122 11:14:58.595915 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerDied","Data":"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a"} Nov 22 11:14:59 crc kubenswrapper[4926]: I1122 11:14:59.604369 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerStarted","Data":"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889"} Nov 22 11:14:59 crc kubenswrapper[4926]: I1122 11:14:59.641065 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-45twv" podStartSLOduration=3.202572764 podStartE2EDuration="5.641038831s" podCreationTimestamp="2025-11-22 11:14:54 +0000 UTC" firstStartedPulling="2025-11-22 11:14:56.554632735 +0000 UTC m=+2116.856238032" lastFinishedPulling="2025-11-22 11:14:58.993098772 +0000 UTC m=+2119.294704099" observedRunningTime="2025-11-22 11:14:59.629778018 +0000 UTC m=+2119.931383315" watchObservedRunningTime="2025-11-22 11:14:59.641038831 +0000 UTC m=+2119.942644128" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.170860 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j"] Nov 22 11:15:00 crc kubenswrapper[4926]: E1122 11:15:00.171809 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="extract-content" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.171832 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="extract-content" Nov 22 11:15:00 crc kubenswrapper[4926]: E1122 11:15:00.171856 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="extract-utilities" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.171863 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="extract-utilities" Nov 22 11:15:00 crc kubenswrapper[4926]: E1122 11:15:00.171876 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="registry-server" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.171900 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="registry-server" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.172235 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cbb2643-d578-4b40-98d8-c06e2cee809a" containerName="registry-server" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.173117 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.176497 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.176566 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.194786 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j"] Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.373568 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qq59\" (UniqueName: \"kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.374076 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.374291 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.475676 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.475788 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qq59\" (UniqueName: \"kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.475834 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.477864 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.481568 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.494265 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qq59\" (UniqueName: \"kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59\") pod \"collect-profiles-29396835-cfx7j\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.495497 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:00 crc kubenswrapper[4926]: I1122 11:15:00.939171 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j"] Nov 22 11:15:01 crc kubenswrapper[4926]: I1122 11:15:01.627745 4926 generic.go:334] "Generic (PLEG): container finished" podID="8233e599-0950-4ba2-82a9-529f156028d2" containerID="ebae1858fba454324c7a15641b0d083df02ba3259c0dc1e43f1e92c96a27f38c" exitCode=0 Nov 22 11:15:01 crc kubenswrapper[4926]: I1122 11:15:01.628037 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" event={"ID":"8233e599-0950-4ba2-82a9-529f156028d2","Type":"ContainerDied","Data":"ebae1858fba454324c7a15641b0d083df02ba3259c0dc1e43f1e92c96a27f38c"} Nov 22 11:15:01 crc kubenswrapper[4926]: I1122 11:15:01.628062 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" event={"ID":"8233e599-0950-4ba2-82a9-529f156028d2","Type":"ContainerStarted","Data":"27cecae6565f97a7b733c2fcd6d3a235ca52591b33b62037965a4df1435b8743"} Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.056925 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.126280 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume\") pod \"8233e599-0950-4ba2-82a9-529f156028d2\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.126522 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qq59\" (UniqueName: \"kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59\") pod \"8233e599-0950-4ba2-82a9-529f156028d2\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.126634 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume\") pod \"8233e599-0950-4ba2-82a9-529f156028d2\" (UID: \"8233e599-0950-4ba2-82a9-529f156028d2\") " Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.127514 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume" (OuterVolumeSpecName: "config-volume") pod "8233e599-0950-4ba2-82a9-529f156028d2" (UID: "8233e599-0950-4ba2-82a9-529f156028d2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.131964 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8233e599-0950-4ba2-82a9-529f156028d2" (UID: "8233e599-0950-4ba2-82a9-529f156028d2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.131994 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59" (OuterVolumeSpecName: "kube-api-access-4qq59") pod "8233e599-0950-4ba2-82a9-529f156028d2" (UID: "8233e599-0950-4ba2-82a9-529f156028d2"). InnerVolumeSpecName "kube-api-access-4qq59". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.227625 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8233e599-0950-4ba2-82a9-529f156028d2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.227650 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qq59\" (UniqueName: \"kubernetes.io/projected/8233e599-0950-4ba2-82a9-529f156028d2-kube-api-access-4qq59\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.227659 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8233e599-0950-4ba2-82a9-529f156028d2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.650250 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" event={"ID":"8233e599-0950-4ba2-82a9-529f156028d2","Type":"ContainerDied","Data":"27cecae6565f97a7b733c2fcd6d3a235ca52591b33b62037965a4df1435b8743"} Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.650307 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27cecae6565f97a7b733c2fcd6d3a235ca52591b33b62037965a4df1435b8743" Nov 22 11:15:03 crc kubenswrapper[4926]: I1122 11:15:03.650329 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396835-cfx7j" Nov 22 11:15:04 crc kubenswrapper[4926]: I1122 11:15:04.128015 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn"] Nov 22 11:15:04 crc kubenswrapper[4926]: I1122 11:15:04.134904 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396790-vw7kn"] Nov 22 11:15:04 crc kubenswrapper[4926]: I1122 11:15:04.594055 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6332434-11ab-46ab-8379-c056d2c292b5" path="/var/lib/kubelet/pods/f6332434-11ab-46ab-8379-c056d2c292b5/volumes" Nov 22 11:15:05 crc kubenswrapper[4926]: I1122 11:15:05.018187 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:05 crc kubenswrapper[4926]: I1122 11:15:05.018237 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:06 crc kubenswrapper[4926]: I1122 11:15:06.072340 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-45twv" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="registry-server" probeResult="failure" output=< Nov 22 11:15:06 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 11:15:06 crc kubenswrapper[4926]: > Nov 22 11:15:09 crc kubenswrapper[4926]: I1122 11:15:09.661976 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:15:09 crc kubenswrapper[4926]: I1122 11:15:09.662969 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:15:09 crc kubenswrapper[4926]: I1122 11:15:09.663069 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:15:09 crc kubenswrapper[4926]: I1122 11:15:09.664416 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:15:09 crc kubenswrapper[4926]: I1122 11:15:09.664496 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b" gracePeriod=600 Nov 22 11:15:10 crc kubenswrapper[4926]: I1122 11:15:10.713595 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b" exitCode=0 Nov 22 11:15:10 crc kubenswrapper[4926]: I1122 11:15:10.713651 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b"} Nov 22 11:15:10 crc kubenswrapper[4926]: I1122 11:15:10.714198 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544"} Nov 22 11:15:10 crc kubenswrapper[4926]: I1122 11:15:10.714235 4926 scope.go:117] "RemoveContainer" containerID="e38f3f8166b5990b08ef0202b8570c03dbfd957e5057988fdcd93fcb0d15ee5f" Nov 22 11:15:15 crc kubenswrapper[4926]: I1122 11:15:15.103370 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:15 crc kubenswrapper[4926]: I1122 11:15:15.181930 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:15 crc kubenswrapper[4926]: I1122 11:15:15.359677 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:15:16 crc kubenswrapper[4926]: I1122 11:15:16.800794 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-45twv" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="registry-server" containerID="cri-o://22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889" gracePeriod=2 Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.334938 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.491329 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities\") pod \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.491489 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqd4p\" (UniqueName: \"kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p\") pod \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.491526 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content\") pod \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\" (UID: \"a514a3f0-32ce-4fcf-8d3b-78dd926ce852\") " Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.492366 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities" (OuterVolumeSpecName: "utilities") pod "a514a3f0-32ce-4fcf-8d3b-78dd926ce852" (UID: "a514a3f0-32ce-4fcf-8d3b-78dd926ce852"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.496658 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p" (OuterVolumeSpecName: "kube-api-access-gqd4p") pod "a514a3f0-32ce-4fcf-8d3b-78dd926ce852" (UID: "a514a3f0-32ce-4fcf-8d3b-78dd926ce852"). InnerVolumeSpecName "kube-api-access-gqd4p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.593717 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqd4p\" (UniqueName: \"kubernetes.io/projected/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-kube-api-access-gqd4p\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.593755 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.599843 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a514a3f0-32ce-4fcf-8d3b-78dd926ce852" (UID: "a514a3f0-32ce-4fcf-8d3b-78dd926ce852"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.695062 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a514a3f0-32ce-4fcf-8d3b-78dd926ce852-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.813062 4926 generic.go:334] "Generic (PLEG): container finished" podID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerID="22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889" exitCode=0 Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.813104 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerDied","Data":"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889"} Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.813131 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-45twv" event={"ID":"a514a3f0-32ce-4fcf-8d3b-78dd926ce852","Type":"ContainerDied","Data":"967627cb017841e79caba26508573dc489454d17c81b77ff5dc2926e6e5932e0"} Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.813148 4926 scope.go:117] "RemoveContainer" containerID="22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.813204 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-45twv" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.861749 4926 scope.go:117] "RemoveContainer" containerID="ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.865204 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.874519 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-45twv"] Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.883976 4926 scope.go:117] "RemoveContainer" containerID="d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.952047 4926 scope.go:117] "RemoveContainer" containerID="22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889" Nov 22 11:15:17 crc kubenswrapper[4926]: E1122 11:15:17.952455 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889\": container with ID starting with 22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889 not found: ID does not exist" containerID="22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.952507 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889"} err="failed to get container status \"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889\": rpc error: code = NotFound desc = could not find container \"22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889\": container with ID starting with 22c4269295aa493c93defdd87fa15f76c44a05bdc977ed809df37a3191802889 not found: ID does not exist" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.952540 4926 scope.go:117] "RemoveContainer" containerID="ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a" Nov 22 11:15:17 crc kubenswrapper[4926]: E1122 11:15:17.952928 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a\": container with ID starting with ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a not found: ID does not exist" containerID="ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.952979 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a"} err="failed to get container status \"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a\": rpc error: code = NotFound desc = could not find container \"ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a\": container with ID starting with ee2c269405e53a69dd90a54503aa28b653935a1046f79b915d466433c68a104a not found: ID does not exist" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.953009 4926 scope.go:117] "RemoveContainer" containerID="d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59" Nov 22 11:15:17 crc kubenswrapper[4926]: E1122 11:15:17.953372 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59\": container with ID starting with d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59 not found: ID does not exist" containerID="d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59" Nov 22 11:15:17 crc kubenswrapper[4926]: I1122 11:15:17.953393 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59"} err="failed to get container status \"d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59\": rpc error: code = NotFound desc = could not find container \"d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59\": container with ID starting with d9f6ee8df7bd1c85e4e017bcbb712e8705ab6e6605afdc86c76aa4b6acc8ed59 not found: ID does not exist" Nov 22 11:15:18 crc kubenswrapper[4926]: I1122 11:15:18.597539 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" path="/var/lib/kubelet/pods/a514a3f0-32ce-4fcf-8d3b-78dd926ce852/volumes" Nov 22 11:15:22 crc kubenswrapper[4926]: I1122 11:15:22.078453 4926 scope.go:117] "RemoveContainer" containerID="3bc4c73d77c40218b9b6c248cd942048e0a507829d42cc11887f49232b4f78e8" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.291497 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:37 crc kubenswrapper[4926]: E1122 11:15:37.292821 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="registry-server" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.292844 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="registry-server" Nov 22 11:15:37 crc kubenswrapper[4926]: E1122 11:15:37.292927 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="extract-utilities" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.292941 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="extract-utilities" Nov 22 11:15:37 crc kubenswrapper[4926]: E1122 11:15:37.292963 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="extract-content" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.292977 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="extract-content" Nov 22 11:15:37 crc kubenswrapper[4926]: E1122 11:15:37.292999 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8233e599-0950-4ba2-82a9-529f156028d2" containerName="collect-profiles" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.293034 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8233e599-0950-4ba2-82a9-529f156028d2" containerName="collect-profiles" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.293352 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8233e599-0950-4ba2-82a9-529f156028d2" containerName="collect-profiles" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.293381 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="a514a3f0-32ce-4fcf-8d3b-78dd926ce852" containerName="registry-server" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.296015 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.315219 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.385848 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wdds\" (UniqueName: \"kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.385930 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.386336 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.488760 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wdds\" (UniqueName: \"kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.488861 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.489086 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.489579 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.489616 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.509440 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wdds\" (UniqueName: \"kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds\") pod \"community-operators-pd8p2\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:37 crc kubenswrapper[4926]: I1122 11:15:37.638686 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:38 crc kubenswrapper[4926]: I1122 11:15:38.159008 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:39 crc kubenswrapper[4926]: I1122 11:15:39.015753 4926 generic.go:334] "Generic (PLEG): container finished" podID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerID="eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563" exitCode=0 Nov 22 11:15:39 crc kubenswrapper[4926]: I1122 11:15:39.015821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerDied","Data":"eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563"} Nov 22 11:15:39 crc kubenswrapper[4926]: I1122 11:15:39.016247 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerStarted","Data":"ae6b18d0c8bd4267ea6638d942b0ada97b728e2027e0761c83fc8328882706b4"} Nov 22 11:15:40 crc kubenswrapper[4926]: I1122 11:15:40.025704 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerStarted","Data":"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b"} Nov 22 11:15:41 crc kubenswrapper[4926]: I1122 11:15:41.039354 4926 generic.go:334] "Generic (PLEG): container finished" podID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerID="92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b" exitCode=0 Nov 22 11:15:41 crc kubenswrapper[4926]: I1122 11:15:41.039605 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerDied","Data":"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b"} Nov 22 11:15:42 crc kubenswrapper[4926]: I1122 11:15:42.050342 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerStarted","Data":"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c"} Nov 22 11:15:42 crc kubenswrapper[4926]: I1122 11:15:42.069438 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pd8p2" podStartSLOduration=2.669680691 podStartE2EDuration="5.069420809s" podCreationTimestamp="2025-11-22 11:15:37 +0000 UTC" firstStartedPulling="2025-11-22 11:15:39.019267381 +0000 UTC m=+2159.320872708" lastFinishedPulling="2025-11-22 11:15:41.419007539 +0000 UTC m=+2161.720612826" observedRunningTime="2025-11-22 11:15:42.068520453 +0000 UTC m=+2162.370125750" watchObservedRunningTime="2025-11-22 11:15:42.069420809 +0000 UTC m=+2162.371026096" Nov 22 11:15:47 crc kubenswrapper[4926]: I1122 11:15:47.639459 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:47 crc kubenswrapper[4926]: I1122 11:15:47.640431 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:47 crc kubenswrapper[4926]: I1122 11:15:47.688464 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:48 crc kubenswrapper[4926]: I1122 11:15:48.148951 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:48 crc kubenswrapper[4926]: I1122 11:15:48.197509 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.119396 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pd8p2" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="registry-server" containerID="cri-o://d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c" gracePeriod=2 Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.550412 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.673369 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wdds\" (UniqueName: \"kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds\") pod \"12bd2170-e028-4c5c-bdc8-838e968e47fa\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.673535 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content\") pod \"12bd2170-e028-4c5c-bdc8-838e968e47fa\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.673583 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities\") pod \"12bd2170-e028-4c5c-bdc8-838e968e47fa\" (UID: \"12bd2170-e028-4c5c-bdc8-838e968e47fa\") " Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.674503 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities" (OuterVolumeSpecName: "utilities") pod "12bd2170-e028-4c5c-bdc8-838e968e47fa" (UID: "12bd2170-e028-4c5c-bdc8-838e968e47fa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.684598 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds" (OuterVolumeSpecName: "kube-api-access-8wdds") pod "12bd2170-e028-4c5c-bdc8-838e968e47fa" (UID: "12bd2170-e028-4c5c-bdc8-838e968e47fa"). InnerVolumeSpecName "kube-api-access-8wdds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.775132 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wdds\" (UniqueName: \"kubernetes.io/projected/12bd2170-e028-4c5c-bdc8-838e968e47fa-kube-api-access-8wdds\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:50 crc kubenswrapper[4926]: I1122 11:15:50.775391 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.098448 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12bd2170-e028-4c5c-bdc8-838e968e47fa" (UID: "12bd2170-e028-4c5c-bdc8-838e968e47fa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.143226 4926 generic.go:334] "Generic (PLEG): container finished" podID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerID="d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c" exitCode=0 Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.143296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerDied","Data":"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c"} Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.143349 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pd8p2" event={"ID":"12bd2170-e028-4c5c-bdc8-838e968e47fa","Type":"ContainerDied","Data":"ae6b18d0c8bd4267ea6638d942b0ada97b728e2027e0761c83fc8328882706b4"} Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.143350 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pd8p2" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.143385 4926 scope.go:117] "RemoveContainer" containerID="d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.184974 4926 scope.go:117] "RemoveContainer" containerID="92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.187508 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12bd2170-e028-4c5c-bdc8-838e968e47fa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.202142 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.212970 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pd8p2"] Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.221515 4926 scope.go:117] "RemoveContainer" containerID="eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.252064 4926 scope.go:117] "RemoveContainer" containerID="d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c" Nov 22 11:15:51 crc kubenswrapper[4926]: E1122 11:15:51.252589 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c\": container with ID starting with d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c not found: ID does not exist" containerID="d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.252635 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c"} err="failed to get container status \"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c\": rpc error: code = NotFound desc = could not find container \"d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c\": container with ID starting with d161de455746ffea592387bf917fba2c0c38bc71b4fd8d53acfd8128154fd01c not found: ID does not exist" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.252662 4926 scope.go:117] "RemoveContainer" containerID="92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b" Nov 22 11:15:51 crc kubenswrapper[4926]: E1122 11:15:51.253121 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b\": container with ID starting with 92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b not found: ID does not exist" containerID="92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.253145 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b"} err="failed to get container status \"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b\": rpc error: code = NotFound desc = could not find container \"92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b\": container with ID starting with 92d67cb42889bd44d0ca4bdb5e57dd3d07ab83b941141d53d2c91b9c1d70a16b not found: ID does not exist" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.253161 4926 scope.go:117] "RemoveContainer" containerID="eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563" Nov 22 11:15:51 crc kubenswrapper[4926]: E1122 11:15:51.253446 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563\": container with ID starting with eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563 not found: ID does not exist" containerID="eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563" Nov 22 11:15:51 crc kubenswrapper[4926]: I1122 11:15:51.253482 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563"} err="failed to get container status \"eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563\": rpc error: code = NotFound desc = could not find container \"eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563\": container with ID starting with eaf2b13fd68e4fb9ccae996227f0fbd1ecc8ee3e9ec99fa4af7017fab0d35563 not found: ID does not exist" Nov 22 11:15:52 crc kubenswrapper[4926]: I1122 11:15:52.593807 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" path="/var/lib/kubelet/pods/12bd2170-e028-4c5c-bdc8-838e968e47fa/volumes" Nov 22 11:17:00 crc kubenswrapper[4926]: I1122 11:17:00.843998 4926 generic.go:334] "Generic (PLEG): container finished" podID="64e25bf4-8746-413f-a28b-264ddfb9feff" containerID="d8c40057d9358b2dacb270e23f66f4e7afba1f457dd107025069df86f57b5ef3" exitCode=0 Nov 22 11:17:00 crc kubenswrapper[4926]: I1122 11:17:00.844073 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" event={"ID":"64e25bf4-8746-413f-a28b-264ddfb9feff","Type":"ContainerDied","Data":"d8c40057d9358b2dacb270e23f66f4e7afba1f457dd107025069df86f57b5ef3"} Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.294940 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.399378 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key\") pod \"64e25bf4-8746-413f-a28b-264ddfb9feff\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.399422 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0\") pod \"64e25bf4-8746-413f-a28b-264ddfb9feff\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.399448 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory\") pod \"64e25bf4-8746-413f-a28b-264ddfb9feff\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.399473 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-699ct\" (UniqueName: \"kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct\") pod \"64e25bf4-8746-413f-a28b-264ddfb9feff\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.399730 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle\") pod \"64e25bf4-8746-413f-a28b-264ddfb9feff\" (UID: \"64e25bf4-8746-413f-a28b-264ddfb9feff\") " Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.406139 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct" (OuterVolumeSpecName: "kube-api-access-699ct") pod "64e25bf4-8746-413f-a28b-264ddfb9feff" (UID: "64e25bf4-8746-413f-a28b-264ddfb9feff"). InnerVolumeSpecName "kube-api-access-699ct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.406235 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "64e25bf4-8746-413f-a28b-264ddfb9feff" (UID: "64e25bf4-8746-413f-a28b-264ddfb9feff"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.430458 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "64e25bf4-8746-413f-a28b-264ddfb9feff" (UID: "64e25bf4-8746-413f-a28b-264ddfb9feff"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.436825 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "64e25bf4-8746-413f-a28b-264ddfb9feff" (UID: "64e25bf4-8746-413f-a28b-264ddfb9feff"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.439692 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory" (OuterVolumeSpecName: "inventory") pod "64e25bf4-8746-413f-a28b-264ddfb9feff" (UID: "64e25bf4-8746-413f-a28b-264ddfb9feff"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.502344 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.502385 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.502403 4926 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.502415 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64e25bf4-8746-413f-a28b-264ddfb9feff-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.502427 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-699ct\" (UniqueName: \"kubernetes.io/projected/64e25bf4-8746-413f-a28b-264ddfb9feff-kube-api-access-699ct\") on node \"crc\" DevicePath \"\"" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.865200 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" event={"ID":"64e25bf4-8746-413f-a28b-264ddfb9feff","Type":"ContainerDied","Data":"53364ac6f03769d520382c20c457f78d697523d2cd9d19045b571662fae8021a"} Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.865262 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53364ac6f03769d520382c20c457f78d697523d2cd9d19045b571662fae8021a" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.865290 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954015 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-554th"] Nov 22 11:17:02 crc kubenswrapper[4926]: E1122 11:17:02.954586 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="extract-content" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954609 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="extract-content" Nov 22 11:17:02 crc kubenswrapper[4926]: E1122 11:17:02.954639 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="extract-utilities" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954648 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="extract-utilities" Nov 22 11:17:02 crc kubenswrapper[4926]: E1122 11:17:02.954666 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64e25bf4-8746-413f-a28b-264ddfb9feff" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954676 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="64e25bf4-8746-413f-a28b-264ddfb9feff" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:17:02 crc kubenswrapper[4926]: E1122 11:17:02.954690 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="registry-server" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954697 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="registry-server" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954909 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="64e25bf4-8746-413f-a28b-264ddfb9feff" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.954924 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="12bd2170-e028-4c5c-bdc8-838e968e47fa" containerName="registry-server" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.955556 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.957966 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.957983 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.959249 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.959298 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.959919 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.960649 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.961781 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 22 11:17:02 crc kubenswrapper[4926]: I1122 11:17:02.971977 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-554th"] Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.113547 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7pf5\" (UniqueName: \"kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.113594 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.113639 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.113679 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.113707 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.114032 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.114114 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.114184 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.114232 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217211 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217262 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217350 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217416 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7pf5\" (UniqueName: \"kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217460 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217534 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.217564 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.218429 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.222297 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.222546 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.222796 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.223731 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.223815 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.230534 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.231781 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.240519 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7pf5\" (UniqueName: \"kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5\") pod \"nova-edpm-deployment-openstack-edpm-ipam-554th\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.271717 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.778572 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-554th"] Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.785255 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:17:03 crc kubenswrapper[4926]: I1122 11:17:03.872954 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" event={"ID":"61e72bc5-b152-4df1-95ee-bb47a81514ff","Type":"ContainerStarted","Data":"60f4703b359bfb483d094fdd5e80fc513f1865e03c3036a23e2e7e6f1ad3339d"} Nov 22 11:17:04 crc kubenswrapper[4926]: I1122 11:17:04.883991 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" event={"ID":"61e72bc5-b152-4df1-95ee-bb47a81514ff","Type":"ContainerStarted","Data":"64763dad4051534ff9d0a25c66d73d265dbaebc2ca0c861bb48194790493a4c5"} Nov 22 11:17:04 crc kubenswrapper[4926]: I1122 11:17:04.907179 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" podStartSLOduration=2.427352731 podStartE2EDuration="2.907160137s" podCreationTimestamp="2025-11-22 11:17:02 +0000 UTC" firstStartedPulling="2025-11-22 11:17:03.784952941 +0000 UTC m=+2244.086558238" lastFinishedPulling="2025-11-22 11:17:04.264760357 +0000 UTC m=+2244.566365644" observedRunningTime="2025-11-22 11:17:04.900922069 +0000 UTC m=+2245.202527366" watchObservedRunningTime="2025-11-22 11:17:04.907160137 +0000 UTC m=+2245.208765424" Nov 22 11:17:09 crc kubenswrapper[4926]: I1122 11:17:09.660970 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:17:09 crc kubenswrapper[4926]: I1122 11:17:09.661682 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:17:39 crc kubenswrapper[4926]: I1122 11:17:39.661436 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:17:39 crc kubenswrapper[4926]: I1122 11:17:39.662059 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:18:09 crc kubenswrapper[4926]: I1122 11:18:09.660796 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:18:09 crc kubenswrapper[4926]: I1122 11:18:09.661482 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:18:09 crc kubenswrapper[4926]: I1122 11:18:09.661547 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:18:09 crc kubenswrapper[4926]: I1122 11:18:09.662512 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:18:09 crc kubenswrapper[4926]: I1122 11:18:09.662586 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" gracePeriod=600 Nov 22 11:18:09 crc kubenswrapper[4926]: E1122 11:18:09.793569 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:18:10 crc kubenswrapper[4926]: I1122 11:18:10.502487 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" exitCode=0 Nov 22 11:18:10 crc kubenswrapper[4926]: I1122 11:18:10.502530 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544"} Nov 22 11:18:10 crc kubenswrapper[4926]: I1122 11:18:10.502944 4926 scope.go:117] "RemoveContainer" containerID="d0b2e230e48b02d7f7eb51b7f5997a9106b1cc69fcc4aca4be72bfa73cbbac6b" Nov 22 11:18:10 crc kubenswrapper[4926]: I1122 11:18:10.503509 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:18:10 crc kubenswrapper[4926]: E1122 11:18:10.503798 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:18:22 crc kubenswrapper[4926]: I1122 11:18:22.582331 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:18:22 crc kubenswrapper[4926]: E1122 11:18:22.583060 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:18:33 crc kubenswrapper[4926]: I1122 11:18:33.582578 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:18:33 crc kubenswrapper[4926]: E1122 11:18:33.583310 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:18:44 crc kubenswrapper[4926]: I1122 11:18:44.581971 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:18:44 crc kubenswrapper[4926]: E1122 11:18:44.582681 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:18:59 crc kubenswrapper[4926]: I1122 11:18:59.583072 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:18:59 crc kubenswrapper[4926]: E1122 11:18:59.586083 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.089498 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.092498 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.103475 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.245151 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.245305 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdqrx\" (UniqueName: \"kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.245423 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.347520 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.347666 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.347765 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdqrx\" (UniqueName: \"kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.348183 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.348478 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.375317 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdqrx\" (UniqueName: \"kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx\") pod \"redhat-marketplace-pxw49\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.420575 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:10 crc kubenswrapper[4926]: I1122 11:19:10.954675 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:10 crc kubenswrapper[4926]: W1122 11:19:10.958055 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f0794c1_ea86_4f14_88f4_d9b34b2d5e6e.slice/crio-c0fab8dc35bdd1a94c52fae6c742fa46806bd8d0d984d11d8676f2b27810ed3b WatchSource:0}: Error finding container c0fab8dc35bdd1a94c52fae6c742fa46806bd8d0d984d11d8676f2b27810ed3b: Status 404 returned error can't find the container with id c0fab8dc35bdd1a94c52fae6c742fa46806bd8d0d984d11d8676f2b27810ed3b Nov 22 11:19:11 crc kubenswrapper[4926]: I1122 11:19:11.084658 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerStarted","Data":"c0fab8dc35bdd1a94c52fae6c742fa46806bd8d0d984d11d8676f2b27810ed3b"} Nov 22 11:19:11 crc kubenswrapper[4926]: I1122 11:19:11.583283 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:19:11 crc kubenswrapper[4926]: E1122 11:19:11.583860 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:19:12 crc kubenswrapper[4926]: I1122 11:19:12.095288 4926 generic.go:334] "Generic (PLEG): container finished" podID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerID="170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69" exitCode=0 Nov 22 11:19:12 crc kubenswrapper[4926]: I1122 11:19:12.095347 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerDied","Data":"170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69"} Nov 22 11:19:13 crc kubenswrapper[4926]: I1122 11:19:13.104068 4926 generic.go:334] "Generic (PLEG): container finished" podID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerID="67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8" exitCode=0 Nov 22 11:19:13 crc kubenswrapper[4926]: I1122 11:19:13.104179 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerDied","Data":"67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8"} Nov 22 11:19:14 crc kubenswrapper[4926]: I1122 11:19:14.120349 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerStarted","Data":"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1"} Nov 22 11:19:14 crc kubenswrapper[4926]: I1122 11:19:14.138364 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pxw49" podStartSLOduration=2.712095469 podStartE2EDuration="4.138345419s" podCreationTimestamp="2025-11-22 11:19:10 +0000 UTC" firstStartedPulling="2025-11-22 11:19:12.098271291 +0000 UTC m=+2372.399876588" lastFinishedPulling="2025-11-22 11:19:13.524521251 +0000 UTC m=+2373.826126538" observedRunningTime="2025-11-22 11:19:14.134354293 +0000 UTC m=+2374.435959590" watchObservedRunningTime="2025-11-22 11:19:14.138345419 +0000 UTC m=+2374.439950706" Nov 22 11:19:20 crc kubenswrapper[4926]: I1122 11:19:20.420991 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:20 crc kubenswrapper[4926]: I1122 11:19:20.421536 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:20 crc kubenswrapper[4926]: I1122 11:19:20.483710 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:21 crc kubenswrapper[4926]: I1122 11:19:21.280641 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:21 crc kubenswrapper[4926]: I1122 11:19:21.343286 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.214682 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pxw49" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="registry-server" containerID="cri-o://01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1" gracePeriod=2 Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.582402 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:19:23 crc kubenswrapper[4926]: E1122 11:19:23.583207 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.676039 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.705398 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities\") pod \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.705491 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content\") pod \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.705823 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdqrx\" (UniqueName: \"kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx\") pod \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\" (UID: \"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e\") " Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.706570 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities" (OuterVolumeSpecName: "utilities") pod "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" (UID: "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.720137 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx" (OuterVolumeSpecName: "kube-api-access-jdqrx") pod "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" (UID: "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e"). InnerVolumeSpecName "kube-api-access-jdqrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.723276 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" (UID: "8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.808194 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdqrx\" (UniqueName: \"kubernetes.io/projected/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-kube-api-access-jdqrx\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.808238 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:23 crc kubenswrapper[4926]: I1122 11:19:23.808251 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.229720 4926 generic.go:334] "Generic (PLEG): container finished" podID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerID="01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1" exitCode=0 Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.229793 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pxw49" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.229786 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerDied","Data":"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1"} Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.230270 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pxw49" event={"ID":"8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e","Type":"ContainerDied","Data":"c0fab8dc35bdd1a94c52fae6c742fa46806bd8d0d984d11d8676f2b27810ed3b"} Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.230309 4926 scope.go:117] "RemoveContainer" containerID="01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.268188 4926 scope.go:117] "RemoveContainer" containerID="67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.280804 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.293458 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pxw49"] Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.303661 4926 scope.go:117] "RemoveContainer" containerID="170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.364394 4926 scope.go:117] "RemoveContainer" containerID="01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1" Nov 22 11:19:24 crc kubenswrapper[4926]: E1122 11:19:24.364923 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1\": container with ID starting with 01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1 not found: ID does not exist" containerID="01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.365157 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1"} err="failed to get container status \"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1\": rpc error: code = NotFound desc = could not find container \"01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1\": container with ID starting with 01562a5a3be52c72a46c86e28f6b3dce3568d6eea7c16fbd6ee9818aa43bbaf1 not found: ID does not exist" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.365186 4926 scope.go:117] "RemoveContainer" containerID="67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8" Nov 22 11:19:24 crc kubenswrapper[4926]: E1122 11:19:24.365617 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8\": container with ID starting with 67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8 not found: ID does not exist" containerID="67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.365645 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8"} err="failed to get container status \"67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8\": rpc error: code = NotFound desc = could not find container \"67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8\": container with ID starting with 67a57130355ff1007c3c7f274fc0757bf8d39716dbc138293bbbda978efda6b8 not found: ID does not exist" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.365663 4926 scope.go:117] "RemoveContainer" containerID="170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69" Nov 22 11:19:24 crc kubenswrapper[4926]: E1122 11:19:24.367365 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69\": container with ID starting with 170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69 not found: ID does not exist" containerID="170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.367424 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69"} err="failed to get container status \"170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69\": rpc error: code = NotFound desc = could not find container \"170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69\": container with ID starting with 170bda1f76ef12168ac46b9129ff0e3b8e6c64dd943f13e71ff64ee13355ad69 not found: ID does not exist" Nov 22 11:19:24 crc kubenswrapper[4926]: I1122 11:19:24.594624 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" path="/var/lib/kubelet/pods/8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e/volumes" Nov 22 11:19:38 crc kubenswrapper[4926]: I1122 11:19:38.582959 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:19:38 crc kubenswrapper[4926]: E1122 11:19:38.584281 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:19:45 crc kubenswrapper[4926]: I1122 11:19:45.433723 4926 generic.go:334] "Generic (PLEG): container finished" podID="61e72bc5-b152-4df1-95ee-bb47a81514ff" containerID="64763dad4051534ff9d0a25c66d73d265dbaebc2ca0c861bb48194790493a4c5" exitCode=0 Nov 22 11:19:45 crc kubenswrapper[4926]: I1122 11:19:45.433831 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" event={"ID":"61e72bc5-b152-4df1-95ee-bb47a81514ff","Type":"ContainerDied","Data":"64763dad4051534ff9d0a25c66d73d265dbaebc2ca0c861bb48194790493a4c5"} Nov 22 11:19:46 crc kubenswrapper[4926]: I1122 11:19:46.957569 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.067652 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.067730 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7pf5\" (UniqueName: \"kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.067783 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.067824 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.067948 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.068058 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.068098 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.068156 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.068220 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory\") pod \"61e72bc5-b152-4df1-95ee-bb47a81514ff\" (UID: \"61e72bc5-b152-4df1-95ee-bb47a81514ff\") " Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.075296 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5" (OuterVolumeSpecName: "kube-api-access-w7pf5") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "kube-api-access-w7pf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.093049 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.102631 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory" (OuterVolumeSpecName: "inventory") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.102663 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.108634 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.110559 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.114164 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.128721 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.133847 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "61e72bc5-b152-4df1-95ee-bb47a81514ff" (UID: "61e72bc5-b152-4df1-95ee-bb47a81514ff"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170209 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170249 4926 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170266 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7pf5\" (UniqueName: \"kubernetes.io/projected/61e72bc5-b152-4df1-95ee-bb47a81514ff-kube-api-access-w7pf5\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170279 4926 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170291 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170302 4926 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170316 4926 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170328 4926 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.170340 4926 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/61e72bc5-b152-4df1-95ee-bb47a81514ff-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.466761 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" event={"ID":"61e72bc5-b152-4df1-95ee-bb47a81514ff","Type":"ContainerDied","Data":"60f4703b359bfb483d094fdd5e80fc513f1865e03c3036a23e2e7e6f1ad3339d"} Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.466821 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60f4703b359bfb483d094fdd5e80fc513f1865e03c3036a23e2e7e6f1ad3339d" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.466923 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-554th" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.575571 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n"] Nov 22 11:19:47 crc kubenswrapper[4926]: E1122 11:19:47.575955 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="extract-content" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.575970 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="extract-content" Nov 22 11:19:47 crc kubenswrapper[4926]: E1122 11:19:47.575993 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="extract-utilities" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576000 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="extract-utilities" Nov 22 11:19:47 crc kubenswrapper[4926]: E1122 11:19:47.576015 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="registry-server" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576022 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="registry-server" Nov 22 11:19:47 crc kubenswrapper[4926]: E1122 11:19:47.576047 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e72bc5-b152-4df1-95ee-bb47a81514ff" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576053 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e72bc5-b152-4df1-95ee-bb47a81514ff" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576210 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f0794c1-ea86-4f14-88f4-d9b34b2d5e6e" containerName="registry-server" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576232 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e72bc5-b152-4df1-95ee-bb47a81514ff" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.576786 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.580252 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.580496 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.580534 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.580612 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.581964 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-lscnm" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.595072 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n"] Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680269 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680391 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680458 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680549 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680592 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680683 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9r2h\" (UniqueName: \"kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.680808 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782577 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782635 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782672 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782716 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782739 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782772 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9r2h\" (UniqueName: \"kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.782826 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.788367 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.788479 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.789310 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.790878 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.791508 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.796365 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.804209 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9r2h\" (UniqueName: \"kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:47 crc kubenswrapper[4926]: I1122 11:19:47.894527 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:19:48 crc kubenswrapper[4926]: I1122 11:19:48.500332 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n"] Nov 22 11:19:49 crc kubenswrapper[4926]: I1122 11:19:49.487005 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" event={"ID":"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4","Type":"ContainerStarted","Data":"c21577db246602fc4cf52c411aa8f0e49d74be2b0c79aae65b224b6405eec3d6"} Nov 22 11:19:49 crc kubenswrapper[4926]: I1122 11:19:49.487421 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" event={"ID":"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4","Type":"ContainerStarted","Data":"e3290bffcacec4a3ea21b96b10ec6d7df293fe6bafd03dd51ecbb5c22dade342"} Nov 22 11:19:52 crc kubenswrapper[4926]: I1122 11:19:52.581982 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:19:52 crc kubenswrapper[4926]: E1122 11:19:52.582715 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:20:03 crc kubenswrapper[4926]: I1122 11:20:03.582915 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:20:03 crc kubenswrapper[4926]: E1122 11:20:03.585591 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:20:15 crc kubenswrapper[4926]: I1122 11:20:15.582239 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:20:15 crc kubenswrapper[4926]: E1122 11:20:15.583078 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:20:27 crc kubenswrapper[4926]: I1122 11:20:27.583403 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:20:27 crc kubenswrapper[4926]: E1122 11:20:27.584368 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:20:42 crc kubenswrapper[4926]: I1122 11:20:42.582412 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:20:42 crc kubenswrapper[4926]: E1122 11:20:42.583148 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:20:57 crc kubenswrapper[4926]: I1122 11:20:57.582479 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:20:57 crc kubenswrapper[4926]: E1122 11:20:57.583167 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:21:11 crc kubenswrapper[4926]: I1122 11:21:11.582463 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:21:11 crc kubenswrapper[4926]: E1122 11:21:11.583417 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:21:24 crc kubenswrapper[4926]: I1122 11:21:24.582787 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:21:24 crc kubenswrapper[4926]: E1122 11:21:24.583523 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:21:36 crc kubenswrapper[4926]: I1122 11:21:36.582788 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:21:36 crc kubenswrapper[4926]: E1122 11:21:36.583673 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:21:50 crc kubenswrapper[4926]: I1122 11:21:50.597778 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:21:50 crc kubenswrapper[4926]: E1122 11:21:50.598748 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:22:01 crc kubenswrapper[4926]: I1122 11:22:01.582565 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:22:01 crc kubenswrapper[4926]: E1122 11:22:01.583829 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:22:11 crc kubenswrapper[4926]: I1122 11:22:11.861752 4926 generic.go:334] "Generic (PLEG): container finished" podID="b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" containerID="c21577db246602fc4cf52c411aa8f0e49d74be2b0c79aae65b224b6405eec3d6" exitCode=0 Nov 22 11:22:11 crc kubenswrapper[4926]: I1122 11:22:11.861840 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" event={"ID":"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4","Type":"ContainerDied","Data":"c21577db246602fc4cf52c411aa8f0e49d74be2b0c79aae65b224b6405eec3d6"} Nov 22 11:22:12 crc kubenswrapper[4926]: I1122 11:22:12.581824 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:22:12 crc kubenswrapper[4926]: E1122 11:22:12.582166 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.319677 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.440916 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441013 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441047 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441147 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441220 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441296 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9r2h\" (UniqueName: \"kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.441319 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0\") pod \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\" (UID: \"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4\") " Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.448139 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h" (OuterVolumeSpecName: "kube-api-access-h9r2h") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "kube-api-access-h9r2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.459404 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.469565 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.471511 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory" (OuterVolumeSpecName: "inventory") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.472341 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.476101 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.481207 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" (UID: "b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.543837 4926 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544202 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544283 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9r2h\" (UniqueName: \"kubernetes.io/projected/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-kube-api-access-h9r2h\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544366 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544466 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544559 4926 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.544618 4926 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.892830 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" event={"ID":"b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4","Type":"ContainerDied","Data":"e3290bffcacec4a3ea21b96b10ec6d7df293fe6bafd03dd51ecbb5c22dade342"} Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.893278 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3290bffcacec4a3ea21b96b10ec6d7df293fe6bafd03dd51ecbb5c22dade342" Nov 22 11:22:13 crc kubenswrapper[4926]: I1122 11:22:13.892902 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n" Nov 22 11:22:26 crc kubenswrapper[4926]: I1122 11:22:26.581858 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:22:26 crc kubenswrapper[4926]: E1122 11:22:26.582705 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:22:39 crc kubenswrapper[4926]: I1122 11:22:39.582718 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:22:39 crc kubenswrapper[4926]: E1122 11:22:39.583791 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:22:44 crc kubenswrapper[4926]: I1122 11:22:44.892956 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-7d858964b4-hd89m" podUID="afb6b154-40e5-4285-9f49-38053bdbb6c4" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.46:8080/readyz\": dial tcp 10.217.0.46:8080: i/o timeout (Client.Timeout exceeded while awaiting headers)" Nov 22 11:22:44 crc kubenswrapper[4926]: I1122 11:22:44.895642 4926 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-webhook-server-676845568d-nb86k" podUID="63e553c4-290f-4b65-a563-b57f0577c982" containerName="webhook-server" probeResult="failure" output="Get \"http://10.217.0.47:7472/metrics\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 11:22:45 crc kubenswrapper[4926]: E1122 11:22:45.008703 4926 kubelet.go:2526] "Housekeeping took longer than expected" err="housekeeping took too long" expected="1s" actual="2.11s" Nov 22 11:22:53 crc kubenswrapper[4926]: I1122 11:22:53.582717 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:22:53 crc kubenswrapper[4926]: E1122 11:22:53.584167 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.413205 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:23:01 crc kubenswrapper[4926]: E1122 11:23:01.420309 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.420388 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.421742 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.423850 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.428522 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-pfcj4" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.429030 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.429307 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.429321 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.438273 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.446740 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.446951 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.447174 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549243 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f89g\" (UniqueName: \"kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549310 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549331 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549350 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549382 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549422 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549454 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549469 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.549526 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.550962 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.551095 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.562156 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.651592 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.652321 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f89g\" (UniqueName: \"kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.652938 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.653005 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.653051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.653211 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.654253 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.654850 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.655337 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.664754 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.664855 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.674328 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f89g\" (UniqueName: \"kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.696939 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"tempest-tests-tempest\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " pod="openstack/tempest-tests-tempest" Nov 22 11:23:01 crc kubenswrapper[4926]: I1122 11:23:01.753083 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:23:02 crc kubenswrapper[4926]: W1122 11:23:02.214580 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod588c20c1_2673_4c55_9dc4_1e20448b5adb.slice/crio-0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6 WatchSource:0}: Error finding container 0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6: Status 404 returned error can't find the container with id 0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6 Nov 22 11:23:02 crc kubenswrapper[4926]: I1122 11:23:02.218269 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:23:02 crc kubenswrapper[4926]: I1122 11:23:02.218952 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 11:23:03 crc kubenswrapper[4926]: I1122 11:23:03.178375 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"588c20c1-2673-4c55-9dc4-1e20448b5adb","Type":"ContainerStarted","Data":"0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6"} Nov 22 11:23:04 crc kubenswrapper[4926]: I1122 11:23:04.581854 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:23:04 crc kubenswrapper[4926]: E1122 11:23:04.582469 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:23:17 crc kubenswrapper[4926]: I1122 11:23:17.582746 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:23:26 crc kubenswrapper[4926]: E1122 11:23:26.182569 4926 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 22 11:23:26 crc kubenswrapper[4926]: E1122 11:23:26.183211 4926 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6f89g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(588c20c1-2673-4c55-9dc4-1e20448b5adb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 11:23:26 crc kubenswrapper[4926]: E1122 11:23:26.184607 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="588c20c1-2673-4c55-9dc4-1e20448b5adb" Nov 22 11:23:26 crc kubenswrapper[4926]: I1122 11:23:26.403403 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19"} Nov 22 11:23:26 crc kubenswrapper[4926]: E1122 11:23:26.406910 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="588c20c1-2673-4c55-9dc4-1e20448b5adb" Nov 22 11:23:43 crc kubenswrapper[4926]: I1122 11:23:43.605727 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"588c20c1-2673-4c55-9dc4-1e20448b5adb","Type":"ContainerStarted","Data":"f128bd51f06034ab7c02bb879e8056f67176448885cac2b8bc9be76ce791d8b8"} Nov 22 11:23:43 crc kubenswrapper[4926]: I1122 11:23:43.627991 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.543911599 podStartE2EDuration="43.627969531s" podCreationTimestamp="2025-11-22 11:23:00 +0000 UTC" firstStartedPulling="2025-11-22 11:23:02.217657589 +0000 UTC m=+2602.519262946" lastFinishedPulling="2025-11-22 11:23:41.301715571 +0000 UTC m=+2641.603320878" observedRunningTime="2025-11-22 11:23:43.621619489 +0000 UTC m=+2643.923224776" watchObservedRunningTime="2025-11-22 11:23:43.627969531 +0000 UTC m=+2643.929574818" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.737115 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.741629 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.764380 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.836418 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.836617 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.836731 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhkw9\" (UniqueName: \"kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.938110 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.938253 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.938286 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhkw9\" (UniqueName: \"kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.938877 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.938964 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:52 crc kubenswrapper[4926]: I1122 11:24:52.960862 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhkw9\" (UniqueName: \"kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9\") pod \"certified-operators-zrwtv\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:53 crc kubenswrapper[4926]: I1122 11:24:53.084930 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:24:53 crc kubenswrapper[4926]: I1122 11:24:53.584820 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:24:54 crc kubenswrapper[4926]: I1122 11:24:54.324440 4926 generic.go:334] "Generic (PLEG): container finished" podID="15efe56d-d0fd-4233-af3b-e22300698f53" containerID="053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd" exitCode=0 Nov 22 11:24:54 crc kubenswrapper[4926]: I1122 11:24:54.324483 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerDied","Data":"053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd"} Nov 22 11:24:54 crc kubenswrapper[4926]: I1122 11:24:54.324507 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerStarted","Data":"3eec5033ff5884829f3467527988db991d15b98c8fdf3cfe8babf8ca83e125a3"} Nov 22 11:24:55 crc kubenswrapper[4926]: I1122 11:24:55.334787 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerStarted","Data":"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e"} Nov 22 11:24:56 crc kubenswrapper[4926]: I1122 11:24:56.343480 4926 generic.go:334] "Generic (PLEG): container finished" podID="15efe56d-d0fd-4233-af3b-e22300698f53" containerID="0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e" exitCode=0 Nov 22 11:24:56 crc kubenswrapper[4926]: I1122 11:24:56.343532 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerDied","Data":"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e"} Nov 22 11:24:57 crc kubenswrapper[4926]: I1122 11:24:57.355168 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerStarted","Data":"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661"} Nov 22 11:24:57 crc kubenswrapper[4926]: I1122 11:24:57.378671 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zrwtv" podStartSLOduration=2.7558586529999998 podStartE2EDuration="5.378654389s" podCreationTimestamp="2025-11-22 11:24:52 +0000 UTC" firstStartedPulling="2025-11-22 11:24:54.326380385 +0000 UTC m=+2714.627985672" lastFinishedPulling="2025-11-22 11:24:56.949176121 +0000 UTC m=+2717.250781408" observedRunningTime="2025-11-22 11:24:57.374742267 +0000 UTC m=+2717.676347564" watchObservedRunningTime="2025-11-22 11:24:57.378654389 +0000 UTC m=+2717.680259676" Nov 22 11:25:03 crc kubenswrapper[4926]: I1122 11:25:03.086018 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:03 crc kubenswrapper[4926]: I1122 11:25:03.086617 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:03 crc kubenswrapper[4926]: I1122 11:25:03.160606 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:03 crc kubenswrapper[4926]: I1122 11:25:03.510946 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:03 crc kubenswrapper[4926]: I1122 11:25:03.586080 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:25:05 crc kubenswrapper[4926]: I1122 11:25:05.437117 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zrwtv" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="registry-server" containerID="cri-o://090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661" gracePeriod=2 Nov 22 11:25:05 crc kubenswrapper[4926]: I1122 11:25:05.958138 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.097395 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities\") pod \"15efe56d-d0fd-4233-af3b-e22300698f53\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.097659 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhkw9\" (UniqueName: \"kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9\") pod \"15efe56d-d0fd-4233-af3b-e22300698f53\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.097712 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content\") pod \"15efe56d-d0fd-4233-af3b-e22300698f53\" (UID: \"15efe56d-d0fd-4233-af3b-e22300698f53\") " Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.098394 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities" (OuterVolumeSpecName: "utilities") pod "15efe56d-d0fd-4233-af3b-e22300698f53" (UID: "15efe56d-d0fd-4233-af3b-e22300698f53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.098931 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.111075 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9" (OuterVolumeSpecName: "kube-api-access-fhkw9") pod "15efe56d-d0fd-4233-af3b-e22300698f53" (UID: "15efe56d-d0fd-4233-af3b-e22300698f53"). InnerVolumeSpecName "kube-api-access-fhkw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.175123 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15efe56d-d0fd-4233-af3b-e22300698f53" (UID: "15efe56d-d0fd-4233-af3b-e22300698f53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.201565 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhkw9\" (UniqueName: \"kubernetes.io/projected/15efe56d-d0fd-4233-af3b-e22300698f53-kube-api-access-fhkw9\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.201870 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15efe56d-d0fd-4233-af3b-e22300698f53-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.457857 4926 generic.go:334] "Generic (PLEG): container finished" podID="15efe56d-d0fd-4233-af3b-e22300698f53" containerID="090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661" exitCode=0 Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.457933 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerDied","Data":"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661"} Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.460059 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zrwtv" event={"ID":"15efe56d-d0fd-4233-af3b-e22300698f53","Type":"ContainerDied","Data":"3eec5033ff5884829f3467527988db991d15b98c8fdf3cfe8babf8ca83e125a3"} Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.460120 4926 scope.go:117] "RemoveContainer" containerID="090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.457975 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zrwtv" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.491853 4926 scope.go:117] "RemoveContainer" containerID="0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.504637 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.516525 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zrwtv"] Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.521936 4926 scope.go:117] "RemoveContainer" containerID="053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.578188 4926 scope.go:117] "RemoveContainer" containerID="090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661" Nov 22 11:25:06 crc kubenswrapper[4926]: E1122 11:25:06.578590 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661\": container with ID starting with 090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661 not found: ID does not exist" containerID="090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.578619 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661"} err="failed to get container status \"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661\": rpc error: code = NotFound desc = could not find container \"090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661\": container with ID starting with 090e97021eea82edffd4f924bbf5656e7d13a71008481fed928a16f4f4d35661 not found: ID does not exist" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.578641 4926 scope.go:117] "RemoveContainer" containerID="0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e" Nov 22 11:25:06 crc kubenswrapper[4926]: E1122 11:25:06.579045 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e\": container with ID starting with 0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e not found: ID does not exist" containerID="0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.579092 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e"} err="failed to get container status \"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e\": rpc error: code = NotFound desc = could not find container \"0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e\": container with ID starting with 0b3dc04b9312d7e11995fa2b214722aa475fafc5bbeba0a03d5e7ecf689dcf4e not found: ID does not exist" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.579128 4926 scope.go:117] "RemoveContainer" containerID="053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd" Nov 22 11:25:06 crc kubenswrapper[4926]: E1122 11:25:06.579392 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd\": container with ID starting with 053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd not found: ID does not exist" containerID="053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.579419 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd"} err="failed to get container status \"053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd\": rpc error: code = NotFound desc = could not find container \"053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd\": container with ID starting with 053376315d0aee4bf4cb4e543cbb042e29a5218b2bb18e3605e19cb512cf02cd not found: ID does not exist" Nov 22 11:25:06 crc kubenswrapper[4926]: I1122 11:25:06.592113 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" path="/var/lib/kubelet/pods/15efe56d-d0fd-4233-af3b-e22300698f53/volumes" Nov 22 11:25:39 crc kubenswrapper[4926]: I1122 11:25:39.660772 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:25:39 crc kubenswrapper[4926]: I1122 11:25:39.661346 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:09 crc kubenswrapper[4926]: I1122 11:26:09.661771 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:26:09 crc kubenswrapper[4926]: I1122 11:26:09.662365 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:39 crc kubenswrapper[4926]: I1122 11:26:39.660786 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:26:39 crc kubenswrapper[4926]: I1122 11:26:39.661440 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:26:39 crc kubenswrapper[4926]: I1122 11:26:39.661507 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:26:39 crc kubenswrapper[4926]: I1122 11:26:39.662444 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:26:39 crc kubenswrapper[4926]: I1122 11:26:39.662538 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19" gracePeriod=600 Nov 22 11:26:40 crc kubenswrapper[4926]: I1122 11:26:40.479023 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19" exitCode=0 Nov 22 11:26:40 crc kubenswrapper[4926]: I1122 11:26:40.479360 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19"} Nov 22 11:26:40 crc kubenswrapper[4926]: I1122 11:26:40.479391 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6"} Nov 22 11:26:40 crc kubenswrapper[4926]: I1122 11:26:40.479409 4926 scope.go:117] "RemoveContainer" containerID="2e3f235c8fb6d7bb8e4f924fdc1e4071eb0ee0c8a090058bfe231e5c577e4544" Nov 22 11:29:09 crc kubenswrapper[4926]: I1122 11:29:09.661050 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:29:09 crc kubenswrapper[4926]: I1122 11:29:09.663391 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:29:39 crc kubenswrapper[4926]: I1122 11:29:39.661522 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:29:39 crc kubenswrapper[4926]: I1122 11:29:39.664011 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.208816 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g"] Nov 22 11:30:00 crc kubenswrapper[4926]: E1122 11:30:00.209698 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.209711 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4926]: E1122 11:30:00.209722 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="extract-utilities" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.209727 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="extract-utilities" Nov 22 11:30:00 crc kubenswrapper[4926]: E1122 11:30:00.209748 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="extract-content" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.209755 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="extract-content" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.209969 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="15efe56d-d0fd-4233-af3b-e22300698f53" containerName="registry-server" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.210575 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.214964 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.215363 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.232064 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g"] Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.251703 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r4j6\" (UniqueName: \"kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.251784 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.252015 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.353758 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r4j6\" (UniqueName: \"kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.353849 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.354709 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.355757 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.360027 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.380857 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r4j6\" (UniqueName: \"kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6\") pod \"collect-profiles-29396850-h4d7g\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:00 crc kubenswrapper[4926]: I1122 11:30:00.534937 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:01 crc kubenswrapper[4926]: I1122 11:30:01.059427 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g"] Nov 22 11:30:01 crc kubenswrapper[4926]: I1122 11:30:01.640554 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" event={"ID":"3b546a81-8877-4ad6-994b-a4e39b4914e0","Type":"ContainerStarted","Data":"5dcc49e235cb09ec65b3743b12c4bd943e855a294a264720fa2ef26a354cda31"} Nov 22 11:30:01 crc kubenswrapper[4926]: I1122 11:30:01.641022 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" event={"ID":"3b546a81-8877-4ad6-994b-a4e39b4914e0","Type":"ContainerStarted","Data":"6bab160aba06ebdf4e8082e7f5f46bcdee19bf9923334adb867315fd64271c1c"} Nov 22 11:30:01 crc kubenswrapper[4926]: I1122 11:30:01.671389 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" podStartSLOduration=1.671369565 podStartE2EDuration="1.671369565s" podCreationTimestamp="2025-11-22 11:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:30:01.661644936 +0000 UTC m=+3021.963250233" watchObservedRunningTime="2025-11-22 11:30:01.671369565 +0000 UTC m=+3021.972974862" Nov 22 11:30:02 crc kubenswrapper[4926]: I1122 11:30:02.655468 4926 generic.go:334] "Generic (PLEG): container finished" podID="3b546a81-8877-4ad6-994b-a4e39b4914e0" containerID="5dcc49e235cb09ec65b3743b12c4bd943e855a294a264720fa2ef26a354cda31" exitCode=0 Nov 22 11:30:02 crc kubenswrapper[4926]: I1122 11:30:02.655571 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" event={"ID":"3b546a81-8877-4ad6-994b-a4e39b4914e0","Type":"ContainerDied","Data":"5dcc49e235cb09ec65b3743b12c4bd943e855a294a264720fa2ef26a354cda31"} Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.126458 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.250493 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume\") pod \"3b546a81-8877-4ad6-994b-a4e39b4914e0\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.250655 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume\") pod \"3b546a81-8877-4ad6-994b-a4e39b4914e0\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.250955 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r4j6\" (UniqueName: \"kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6\") pod \"3b546a81-8877-4ad6-994b-a4e39b4914e0\" (UID: \"3b546a81-8877-4ad6-994b-a4e39b4914e0\") " Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.252985 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume" (OuterVolumeSpecName: "config-volume") pod "3b546a81-8877-4ad6-994b-a4e39b4914e0" (UID: "3b546a81-8877-4ad6-994b-a4e39b4914e0"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.257656 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3b546a81-8877-4ad6-994b-a4e39b4914e0" (UID: "3b546a81-8877-4ad6-994b-a4e39b4914e0"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.258472 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6" (OuterVolumeSpecName: "kube-api-access-8r4j6") pod "3b546a81-8877-4ad6-994b-a4e39b4914e0" (UID: "3b546a81-8877-4ad6-994b-a4e39b4914e0"). InnerVolumeSpecName "kube-api-access-8r4j6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.353740 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3b546a81-8877-4ad6-994b-a4e39b4914e0-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.354209 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3b546a81-8877-4ad6-994b-a4e39b4914e0-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.354230 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r4j6\" (UniqueName: \"kubernetes.io/projected/3b546a81-8877-4ad6-994b-a4e39b4914e0-kube-api-access-8r4j6\") on node \"crc\" DevicePath \"\"" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.690868 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" event={"ID":"3b546a81-8877-4ad6-994b-a4e39b4914e0","Type":"ContainerDied","Data":"6bab160aba06ebdf4e8082e7f5f46bcdee19bf9923334adb867315fd64271c1c"} Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.690963 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6bab160aba06ebdf4e8082e7f5f46bcdee19bf9923334adb867315fd64271c1c" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.691013 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396850-h4d7g" Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.762513 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2"] Nov 22 11:30:04 crc kubenswrapper[4926]: I1122 11:30:04.774300 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396805-9nvz2"] Nov 22 11:30:06 crc kubenswrapper[4926]: I1122 11:30:06.597004 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17576076-3001-49ed-a84d-b691103b01f6" path="/var/lib/kubelet/pods/17576076-3001-49ed-a84d-b691103b01f6/volumes" Nov 22 11:30:09 crc kubenswrapper[4926]: I1122 11:30:09.661506 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:30:09 crc kubenswrapper[4926]: I1122 11:30:09.662245 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:30:09 crc kubenswrapper[4926]: I1122 11:30:09.662311 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:30:09 crc kubenswrapper[4926]: I1122 11:30:09.663407 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:30:09 crc kubenswrapper[4926]: I1122 11:30:09.663533 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" gracePeriod=600 Nov 22 11:30:09 crc kubenswrapper[4926]: E1122 11:30:09.786098 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:30:10 crc kubenswrapper[4926]: I1122 11:30:10.764819 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" exitCode=0 Nov 22 11:30:10 crc kubenswrapper[4926]: I1122 11:30:10.764866 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6"} Nov 22 11:30:10 crc kubenswrapper[4926]: I1122 11:30:10.765301 4926 scope.go:117] "RemoveContainer" containerID="37ef0851b72d879c7ef8e82174c0ce3461ce067e9c13a134ab7ff49f8a56bf19" Nov 22 11:30:10 crc kubenswrapper[4926]: I1122 11:30:10.766376 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:30:10 crc kubenswrapper[4926]: E1122 11:30:10.767009 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:30:22 crc kubenswrapper[4926]: I1122 11:30:22.552697 4926 scope.go:117] "RemoveContainer" containerID="9ea63c375f1834940d7f3e3966c602507e7e88e4a0e00624092fb00c53e59759" Nov 22 11:30:25 crc kubenswrapper[4926]: I1122 11:30:25.582519 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:30:25 crc kubenswrapper[4926]: E1122 11:30:25.583114 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:30:37 crc kubenswrapper[4926]: I1122 11:30:37.582460 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:30:37 crc kubenswrapper[4926]: E1122 11:30:37.583483 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:30:49 crc kubenswrapper[4926]: I1122 11:30:49.582354 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:30:49 crc kubenswrapper[4926]: E1122 11:30:49.583062 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:00 crc kubenswrapper[4926]: I1122 11:31:00.596648 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:31:00 crc kubenswrapper[4926]: E1122 11:31:00.597508 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:13 crc kubenswrapper[4926]: I1122 11:31:13.582303 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:31:13 crc kubenswrapper[4926]: E1122 11:31:13.583151 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:25 crc kubenswrapper[4926]: I1122 11:31:25.583400 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:31:25 crc kubenswrapper[4926]: E1122 11:31:25.584951 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.291577 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:30 crc kubenswrapper[4926]: E1122 11:31:30.293333 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b546a81-8877-4ad6-994b-a4e39b4914e0" containerName="collect-profiles" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.293449 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b546a81-8877-4ad6-994b-a4e39b4914e0" containerName="collect-profiles" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.293756 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b546a81-8877-4ad6-994b-a4e39b4914e0" containerName="collect-profiles" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.297238 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.312932 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.455197 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.455826 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.456204 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79jls\" (UniqueName: \"kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.558444 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.558519 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79jls\" (UniqueName: \"kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.558565 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.559045 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.559341 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.594304 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79jls\" (UniqueName: \"kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls\") pod \"redhat-marketplace-hfxdk\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:30 crc kubenswrapper[4926]: I1122 11:31:30.644197 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.201088 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.690938 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.693910 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.719641 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.732057 4926 generic.go:334] "Generic (PLEG): container finished" podID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerID="b0b74b5663a56ed0be17f7f9b46c8de3e4cdfd1c14d41e30fb5c30dc0ec5f1fe" exitCode=0 Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.732123 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerDied","Data":"b0b74b5663a56ed0be17f7f9b46c8de3e4cdfd1c14d41e30fb5c30dc0ec5f1fe"} Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.732168 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerStarted","Data":"9a56fa47606aaaffc5784b22e40cb6e413f8bcc469602eb871b6b9a0bda09c53"} Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.735188 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.786266 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.786330 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9bmc\" (UniqueName: \"kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.786388 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.888380 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.888610 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.888719 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9bmc\" (UniqueName: \"kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.888810 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.889292 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:31 crc kubenswrapper[4926]: I1122 11:31:31.913954 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9bmc\" (UniqueName: \"kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc\") pod \"redhat-operators-7jx9m\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.019612 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:32 crc kubenswrapper[4926]: W1122 11:31:32.532592 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod48f1ec9f_6654_4554_92a1_616623822964.slice/crio-e6f52f899553e5449042151d42ab2c8e357c501b2bbcd4ba0a36713348165fee WatchSource:0}: Error finding container e6f52f899553e5449042151d42ab2c8e357c501b2bbcd4ba0a36713348165fee: Status 404 returned error can't find the container with id e6f52f899553e5449042151d42ab2c8e357c501b2bbcd4ba0a36713348165fee Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.545932 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.686387 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.688318 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.704033 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.750952 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerStarted","Data":"e6f52f899553e5449042151d42ab2c8e357c501b2bbcd4ba0a36713348165fee"} Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.753771 4926 generic.go:334] "Generic (PLEG): container finished" podID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerID="db378ef331198b9a08a480677593f3ece7b641f63895697c6f948cfec0deba36" exitCode=0 Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.753821 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerDied","Data":"db378ef331198b9a08a480677593f3ece7b641f63895697c6f948cfec0deba36"} Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.813616 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.813860 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clvq8\" (UniqueName: \"kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.813950 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.915567 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clvq8\" (UniqueName: \"kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.915649 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.915669 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.916420 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.917108 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:32 crc kubenswrapper[4926]: I1122 11:31:32.939670 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clvq8\" (UniqueName: \"kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8\") pod \"community-operators-7f59b\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.027767 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.583540 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:33 crc kubenswrapper[4926]: W1122 11:31:33.589855 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb3febd1d_c011_4b2a_93bf_4ab2635ec127.slice/crio-d6f3ee2a16add1d5f8c88a031484549cdc9af958db709033c402221ca78e3d6a WatchSource:0}: Error finding container d6f3ee2a16add1d5f8c88a031484549cdc9af958db709033c402221ca78e3d6a: Status 404 returned error can't find the container with id d6f3ee2a16add1d5f8c88a031484549cdc9af958db709033c402221ca78e3d6a Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.764635 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerStarted","Data":"d6f3ee2a16add1d5f8c88a031484549cdc9af958db709033c402221ca78e3d6a"} Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.769567 4926 generic.go:334] "Generic (PLEG): container finished" podID="48f1ec9f-6654-4554-92a1-616623822964" containerID="f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231" exitCode=0 Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.769631 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerDied","Data":"f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231"} Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.775503 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerStarted","Data":"8bc150ce2d6b6bb26a195632b2b09d6f674df72091ebf1c0fd196def751a583c"} Nov 22 11:31:33 crc kubenswrapper[4926]: I1122 11:31:33.821378 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hfxdk" podStartSLOduration=2.372809873 podStartE2EDuration="3.821358949s" podCreationTimestamp="2025-11-22 11:31:30 +0000 UTC" firstStartedPulling="2025-11-22 11:31:31.734879716 +0000 UTC m=+3112.036484993" lastFinishedPulling="2025-11-22 11:31:33.183428782 +0000 UTC m=+3113.485034069" observedRunningTime="2025-11-22 11:31:33.820850934 +0000 UTC m=+3114.122456211" watchObservedRunningTime="2025-11-22 11:31:33.821358949 +0000 UTC m=+3114.122964236" Nov 22 11:31:34 crc kubenswrapper[4926]: I1122 11:31:34.789201 4926 generic.go:334] "Generic (PLEG): container finished" podID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerID="693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000" exitCode=0 Nov 22 11:31:34 crc kubenswrapper[4926]: I1122 11:31:34.789310 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerDied","Data":"693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000"} Nov 22 11:31:34 crc kubenswrapper[4926]: I1122 11:31:34.798214 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerStarted","Data":"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a"} Nov 22 11:31:36 crc kubenswrapper[4926]: I1122 11:31:36.834011 4926 generic.go:334] "Generic (PLEG): container finished" podID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerID="2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce" exitCode=0 Nov 22 11:31:36 crc kubenswrapper[4926]: I1122 11:31:36.834174 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerDied","Data":"2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce"} Nov 22 11:31:36 crc kubenswrapper[4926]: I1122 11:31:36.838261 4926 generic.go:334] "Generic (PLEG): container finished" podID="48f1ec9f-6654-4554-92a1-616623822964" containerID="25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a" exitCode=0 Nov 22 11:31:36 crc kubenswrapper[4926]: I1122 11:31:36.838309 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerDied","Data":"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a"} Nov 22 11:31:38 crc kubenswrapper[4926]: I1122 11:31:38.868957 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerStarted","Data":"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa"} Nov 22 11:31:38 crc kubenswrapper[4926]: I1122 11:31:38.872272 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerStarted","Data":"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7"} Nov 22 11:31:38 crc kubenswrapper[4926]: I1122 11:31:38.905433 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7f59b" podStartSLOduration=4.059604235 podStartE2EDuration="6.905404756s" podCreationTimestamp="2025-11-22 11:31:32 +0000 UTC" firstStartedPulling="2025-11-22 11:31:34.795296763 +0000 UTC m=+3115.096902090" lastFinishedPulling="2025-11-22 11:31:37.641097324 +0000 UTC m=+3117.942702611" observedRunningTime="2025-11-22 11:31:38.895798931 +0000 UTC m=+3119.197404228" watchObservedRunningTime="2025-11-22 11:31:38.905404756 +0000 UTC m=+3119.207010043" Nov 22 11:31:38 crc kubenswrapper[4926]: I1122 11:31:38.918270 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7jx9m" podStartSLOduration=3.997662137 podStartE2EDuration="7.918244544s" podCreationTimestamp="2025-11-22 11:31:31 +0000 UTC" firstStartedPulling="2025-11-22 11:31:33.775459553 +0000 UTC m=+3114.077064850" lastFinishedPulling="2025-11-22 11:31:37.69604195 +0000 UTC m=+3117.997647257" observedRunningTime="2025-11-22 11:31:38.909466433 +0000 UTC m=+3119.211071710" watchObservedRunningTime="2025-11-22 11:31:38.918244544 +0000 UTC m=+3119.219849831" Nov 22 11:31:40 crc kubenswrapper[4926]: I1122 11:31:40.590227 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:31:40 crc kubenswrapper[4926]: E1122 11:31:40.592263 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:40 crc kubenswrapper[4926]: I1122 11:31:40.645088 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:40 crc kubenswrapper[4926]: I1122 11:31:40.645170 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:40 crc kubenswrapper[4926]: I1122 11:31:40.721396 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:40 crc kubenswrapper[4926]: I1122 11:31:40.945284 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:42 crc kubenswrapper[4926]: I1122 11:31:42.020299 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:42 crc kubenswrapper[4926]: I1122 11:31:42.020379 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.028368 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.028908 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.086468 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7jx9m" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="registry-server" probeResult="failure" output=< Nov 22 11:31:43 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 11:31:43 crc kubenswrapper[4926]: > Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.096996 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.711424 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.711634 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hfxdk" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="registry-server" containerID="cri-o://8bc150ce2d6b6bb26a195632b2b09d6f674df72091ebf1c0fd196def751a583c" gracePeriod=2 Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.926315 4926 generic.go:334] "Generic (PLEG): container finished" podID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerID="8bc150ce2d6b6bb26a195632b2b09d6f674df72091ebf1c0fd196def751a583c" exitCode=0 Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.926484 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerDied","Data":"8bc150ce2d6b6bb26a195632b2b09d6f674df72091ebf1c0fd196def751a583c"} Nov 22 11:31:43 crc kubenswrapper[4926]: I1122 11:31:43.994016 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.218585 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.270932 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79jls\" (UniqueName: \"kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls\") pod \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.271152 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities\") pod \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.271239 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content\") pod \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\" (UID: \"6f769723-ecc6-4cb7-821f-977c9d3f4dfb\") " Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.271946 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities" (OuterVolumeSpecName: "utilities") pod "6f769723-ecc6-4cb7-821f-977c9d3f4dfb" (UID: "6f769723-ecc6-4cb7-821f-977c9d3f4dfb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.279989 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls" (OuterVolumeSpecName: "kube-api-access-79jls") pod "6f769723-ecc6-4cb7-821f-977c9d3f4dfb" (UID: "6f769723-ecc6-4cb7-821f-977c9d3f4dfb"). InnerVolumeSpecName "kube-api-access-79jls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.287392 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f769723-ecc6-4cb7-821f-977c9d3f4dfb" (UID: "6f769723-ecc6-4cb7-821f-977c9d3f4dfb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.373949 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79jls\" (UniqueName: \"kubernetes.io/projected/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-kube-api-access-79jls\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.373999 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.374018 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f769723-ecc6-4cb7-821f-977c9d3f4dfb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.938162 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hfxdk" event={"ID":"6f769723-ecc6-4cb7-821f-977c9d3f4dfb","Type":"ContainerDied","Data":"9a56fa47606aaaffc5784b22e40cb6e413f8bcc469602eb871b6b9a0bda09c53"} Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.938242 4926 scope.go:117] "RemoveContainer" containerID="8bc150ce2d6b6bb26a195632b2b09d6f674df72091ebf1c0fd196def751a583c" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.938176 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hfxdk" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.970145 4926 scope.go:117] "RemoveContainer" containerID="db378ef331198b9a08a480677593f3ece7b641f63895697c6f948cfec0deba36" Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.971863 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.982213 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hfxdk"] Nov 22 11:31:44 crc kubenswrapper[4926]: I1122 11:31:44.998434 4926 scope.go:117] "RemoveContainer" containerID="b0b74b5663a56ed0be17f7f9b46c8de3e4cdfd1c14d41e30fb5c30dc0ec5f1fe" Nov 22 11:31:45 crc kubenswrapper[4926]: I1122 11:31:45.472434 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:45 crc kubenswrapper[4926]: I1122 11:31:45.946180 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7f59b" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="registry-server" containerID="cri-o://d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa" gracePeriod=2 Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.495475 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.596320 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" path="/var/lib/kubelet/pods/6f769723-ecc6-4cb7-821f-977c9d3f4dfb/volumes" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.627627 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities\") pod \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.628060 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content\") pod \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.628177 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clvq8\" (UniqueName: \"kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8\") pod \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\" (UID: \"b3febd1d-c011-4b2a-93bf-4ab2635ec127\") " Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.628619 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities" (OuterVolumeSpecName: "utilities") pod "b3febd1d-c011-4b2a-93bf-4ab2635ec127" (UID: "b3febd1d-c011-4b2a-93bf-4ab2635ec127"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.629486 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.633575 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8" (OuterVolumeSpecName: "kube-api-access-clvq8") pod "b3febd1d-c011-4b2a-93bf-4ab2635ec127" (UID: "b3febd1d-c011-4b2a-93bf-4ab2635ec127"). InnerVolumeSpecName "kube-api-access-clvq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.681021 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3febd1d-c011-4b2a-93bf-4ab2635ec127" (UID: "b3febd1d-c011-4b2a-93bf-4ab2635ec127"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.732113 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3febd1d-c011-4b2a-93bf-4ab2635ec127-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.732168 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clvq8\" (UniqueName: \"kubernetes.io/projected/b3febd1d-c011-4b2a-93bf-4ab2635ec127-kube-api-access-clvq8\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.959782 4926 generic.go:334] "Generic (PLEG): container finished" podID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerID="d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa" exitCode=0 Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.959844 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7f59b" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.959867 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerDied","Data":"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa"} Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.959948 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7f59b" event={"ID":"b3febd1d-c011-4b2a-93bf-4ab2635ec127","Type":"ContainerDied","Data":"d6f3ee2a16add1d5f8c88a031484549cdc9af958db709033c402221ca78e3d6a"} Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.959983 4926 scope.go:117] "RemoveContainer" containerID="d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa" Nov 22 11:31:46 crc kubenswrapper[4926]: I1122 11:31:46.988721 4926 scope.go:117] "RemoveContainer" containerID="2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.012752 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.025546 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7f59b"] Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.033827 4926 scope.go:117] "RemoveContainer" containerID="693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.093056 4926 scope.go:117] "RemoveContainer" containerID="d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa" Nov 22 11:31:47 crc kubenswrapper[4926]: E1122 11:31:47.093682 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa\": container with ID starting with d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa not found: ID does not exist" containerID="d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.093715 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa"} err="failed to get container status \"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa\": rpc error: code = NotFound desc = could not find container \"d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa\": container with ID starting with d479cf2ecf43fbd4be51f2a591f096c8075f7058bd84ce365970e84d1fbbc9aa not found: ID does not exist" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.093739 4926 scope.go:117] "RemoveContainer" containerID="2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce" Nov 22 11:31:47 crc kubenswrapper[4926]: E1122 11:31:47.094227 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce\": container with ID starting with 2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce not found: ID does not exist" containerID="2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.094245 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce"} err="failed to get container status \"2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce\": rpc error: code = NotFound desc = could not find container \"2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce\": container with ID starting with 2eb0b3c23a5a310173207a81880d9c814f6410015db1150e05a3e38f512506ce not found: ID does not exist" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.094257 4926 scope.go:117] "RemoveContainer" containerID="693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000" Nov 22 11:31:47 crc kubenswrapper[4926]: E1122 11:31:47.094635 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000\": container with ID starting with 693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000 not found: ID does not exist" containerID="693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000" Nov 22 11:31:47 crc kubenswrapper[4926]: I1122 11:31:47.094652 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000"} err="failed to get container status \"693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000\": rpc error: code = NotFound desc = could not find container \"693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000\": container with ID starting with 693345290afbb593fe95adfbfaf37e4a219776063acc14ae519bd02d99de7000 not found: ID does not exist" Nov 22 11:31:48 crc kubenswrapper[4926]: I1122 11:31:48.593070 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" path="/var/lib/kubelet/pods/b3febd1d-c011-4b2a-93bf-4ab2635ec127/volumes" Nov 22 11:31:52 crc kubenswrapper[4926]: I1122 11:31:52.070581 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:52 crc kubenswrapper[4926]: I1122 11:31:52.129749 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:52 crc kubenswrapper[4926]: I1122 11:31:52.316238 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:52 crc kubenswrapper[4926]: I1122 11:31:52.581848 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:31:52 crc kubenswrapper[4926]: E1122 11:31:52.582383 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.034981 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7jx9m" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="registry-server" containerID="cri-o://fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7" gracePeriod=2 Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.512730 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.605991 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content\") pod \"48f1ec9f-6654-4554-92a1-616623822964\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.606055 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9bmc\" (UniqueName: \"kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc\") pod \"48f1ec9f-6654-4554-92a1-616623822964\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.606094 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities\") pod \"48f1ec9f-6654-4554-92a1-616623822964\" (UID: \"48f1ec9f-6654-4554-92a1-616623822964\") " Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.607663 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities" (OuterVolumeSpecName: "utilities") pod "48f1ec9f-6654-4554-92a1-616623822964" (UID: "48f1ec9f-6654-4554-92a1-616623822964"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.615676 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc" (OuterVolumeSpecName: "kube-api-access-g9bmc") pod "48f1ec9f-6654-4554-92a1-616623822964" (UID: "48f1ec9f-6654-4554-92a1-616623822964"). InnerVolumeSpecName "kube-api-access-g9bmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.708159 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9bmc\" (UniqueName: \"kubernetes.io/projected/48f1ec9f-6654-4554-92a1-616623822964-kube-api-access-g9bmc\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.708433 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.716105 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "48f1ec9f-6654-4554-92a1-616623822964" (UID: "48f1ec9f-6654-4554-92a1-616623822964"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:31:54 crc kubenswrapper[4926]: I1122 11:31:54.809948 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/48f1ec9f-6654-4554-92a1-616623822964-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.046169 4926 generic.go:334] "Generic (PLEG): container finished" podID="48f1ec9f-6654-4554-92a1-616623822964" containerID="fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7" exitCode=0 Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.046211 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerDied","Data":"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7"} Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.046236 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7jx9m" event={"ID":"48f1ec9f-6654-4554-92a1-616623822964","Type":"ContainerDied","Data":"e6f52f899553e5449042151d42ab2c8e357c501b2bbcd4ba0a36713348165fee"} Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.046252 4926 scope.go:117] "RemoveContainer" containerID="fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.046305 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7jx9m" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.068227 4926 scope.go:117] "RemoveContainer" containerID="25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.090494 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.102315 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7jx9m"] Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.105849 4926 scope.go:117] "RemoveContainer" containerID="f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.141331 4926 scope.go:117] "RemoveContainer" containerID="fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7" Nov 22 11:31:55 crc kubenswrapper[4926]: E1122 11:31:55.141793 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7\": container with ID starting with fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7 not found: ID does not exist" containerID="fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.141832 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7"} err="failed to get container status \"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7\": rpc error: code = NotFound desc = could not find container \"fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7\": container with ID starting with fc96ab22353aba46d0d088ed481bf77876ac3025d17ac427ffc5e8a2e78c25f7 not found: ID does not exist" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.141858 4926 scope.go:117] "RemoveContainer" containerID="25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a" Nov 22 11:31:55 crc kubenswrapper[4926]: E1122 11:31:55.142194 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a\": container with ID starting with 25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a not found: ID does not exist" containerID="25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.142228 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a"} err="failed to get container status \"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a\": rpc error: code = NotFound desc = could not find container \"25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a\": container with ID starting with 25c9c6bcca79b788c71eed3e4f97bdd405d70d63b7c559b9caf22fb4e37a6a9a not found: ID does not exist" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.142248 4926 scope.go:117] "RemoveContainer" containerID="f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231" Nov 22 11:31:55 crc kubenswrapper[4926]: E1122 11:31:55.142657 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231\": container with ID starting with f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231 not found: ID does not exist" containerID="f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231" Nov 22 11:31:55 crc kubenswrapper[4926]: I1122 11:31:55.142689 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231"} err="failed to get container status \"f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231\": rpc error: code = NotFound desc = could not find container \"f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231\": container with ID starting with f2c608bf835749b95d3c6168fab8758cade7b27334c66b081887e318c1eb3231 not found: ID does not exist" Nov 22 11:31:56 crc kubenswrapper[4926]: I1122 11:31:56.595359 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="48f1ec9f-6654-4554-92a1-616623822964" path="/var/lib/kubelet/pods/48f1ec9f-6654-4554-92a1-616623822964/volumes" Nov 22 11:32:06 crc kubenswrapper[4926]: I1122 11:32:06.582243 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:32:06 crc kubenswrapper[4926]: E1122 11:32:06.583293 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:32:19 crc kubenswrapper[4926]: I1122 11:32:19.582277 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:32:19 crc kubenswrapper[4926]: E1122 11:32:19.583115 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:32:31 crc kubenswrapper[4926]: I1122 11:32:31.582749 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:32:31 crc kubenswrapper[4926]: E1122 11:32:31.583684 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:32:46 crc kubenswrapper[4926]: I1122 11:32:46.582072 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:32:46 crc kubenswrapper[4926]: E1122 11:32:46.583094 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:33:01 crc kubenswrapper[4926]: I1122 11:33:01.582796 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:33:01 crc kubenswrapper[4926]: E1122 11:33:01.583608 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:33:13 crc kubenswrapper[4926]: I1122 11:33:13.582389 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:33:13 crc kubenswrapper[4926]: E1122 11:33:13.583526 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:33:26 crc kubenswrapper[4926]: I1122 11:33:26.582266 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:33:26 crc kubenswrapper[4926]: E1122 11:33:26.583384 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:33:41 crc kubenswrapper[4926]: I1122 11:33:41.582509 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:33:41 crc kubenswrapper[4926]: E1122 11:33:41.583585 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:33:54 crc kubenswrapper[4926]: I1122 11:33:54.582359 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:33:54 crc kubenswrapper[4926]: E1122 11:33:54.583204 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:34:07 crc kubenswrapper[4926]: I1122 11:34:07.581707 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:34:07 crc kubenswrapper[4926]: E1122 11:34:07.582577 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:34:21 crc kubenswrapper[4926]: I1122 11:34:21.582065 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:34:21 crc kubenswrapper[4926]: E1122 11:34:21.582964 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:34:34 crc kubenswrapper[4926]: I1122 11:34:34.582621 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:34:34 crc kubenswrapper[4926]: E1122 11:34:34.583824 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:34:46 crc kubenswrapper[4926]: I1122 11:34:46.582408 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:34:46 crc kubenswrapper[4926]: E1122 11:34:46.583258 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:34:58 crc kubenswrapper[4926]: I1122 11:34:58.583199 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:34:58 crc kubenswrapper[4926]: E1122 11:34:58.584514 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:35:08 crc kubenswrapper[4926]: I1122 11:35:08.614848 4926 generic.go:334] "Generic (PLEG): container finished" podID="588c20c1-2673-4c55-9dc4-1e20448b5adb" containerID="f128bd51f06034ab7c02bb879e8056f67176448885cac2b8bc9be76ce791d8b8" exitCode=0 Nov 22 11:35:08 crc kubenswrapper[4926]: I1122 11:35:08.614956 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"588c20c1-2673-4c55-9dc4-1e20448b5adb","Type":"ContainerDied","Data":"f128bd51f06034ab7c02bb879e8056f67176448885cac2b8bc9be76ce791d8b8"} Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.051601 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217086 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217151 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217227 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217340 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217379 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6f89g\" (UniqueName: \"kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217466 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217508 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217716 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.217851 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir\") pod \"588c20c1-2673-4c55-9dc4-1e20448b5adb\" (UID: \"588c20c1-2673-4c55-9dc4-1e20448b5adb\") " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.219502 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.220497 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data" (OuterVolumeSpecName: "config-data") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.224324 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "test-operator-logs") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.225139 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.226473 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g" (OuterVolumeSpecName: "kube-api-access-6f89g") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "kube-api-access-6f89g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.253926 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.259700 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.272778 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.278176 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "588c20c1-2673-4c55-9dc4-1e20448b5adb" (UID: "588c20c1-2673-4c55-9dc4-1e20448b5adb"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320539 4926 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320581 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320596 4926 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320607 4926 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320619 4926 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/588c20c1-2673-4c55-9dc4-1e20448b5adb-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320632 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6f89g\" (UniqueName: \"kubernetes.io/projected/588c20c1-2673-4c55-9dc4-1e20448b5adb-kube-api-access-6f89g\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320643 4926 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/588c20c1-2673-4c55-9dc4-1e20448b5adb-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320684 4926 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.320697 4926 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/588c20c1-2673-4c55-9dc4-1e20448b5adb-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.341419 4926 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.422605 4926 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.640120 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"588c20c1-2673-4c55-9dc4-1e20448b5adb","Type":"ContainerDied","Data":"0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6"} Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.640358 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a54df007a942324957a8d3fa74d0f721814635335a55a9a7671410fb2957eb6" Nov 22 11:35:10 crc kubenswrapper[4926]: I1122 11:35:10.640222 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 11:35:11 crc kubenswrapper[4926]: I1122 11:35:11.583145 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:35:12 crc kubenswrapper[4926]: I1122 11:35:12.664550 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e"} Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.822489 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823431 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823449 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823467 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823475 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823505 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823513 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823536 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="588c20c1-2673-4c55-9dc4-1e20448b5adb" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823544 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="588c20c1-2673-4c55-9dc4-1e20448b5adb" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823562 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823569 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823582 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823589 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823603 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823610 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823627 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823634 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823645 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823651 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="extract-utilities" Nov 22 11:35:13 crc kubenswrapper[4926]: E1122 11:35:13.823665 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823672 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="extract-content" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.823860 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f769723-ecc6-4cb7-821f-977c9d3f4dfb" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.824957 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3febd1d-c011-4b2a-93bf-4ab2635ec127" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.824974 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="48f1ec9f-6654-4554-92a1-616623822964" containerName="registry-server" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.825014 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="588c20c1-2673-4c55-9dc4-1e20448b5adb" containerName="tempest-tests-tempest-tests-runner" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.826165 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.828223 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-pfcj4" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.837750 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.996913 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:13 crc kubenswrapper[4926]: I1122 11:35:13.997088 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhjd6\" (UniqueName: \"kubernetes.io/projected/2afc1e0b-105d-4e75-b966-4a8bdff5f07f-kube-api-access-qhjd6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.098741 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhjd6\" (UniqueName: \"kubernetes.io/projected/2afc1e0b-105d-4e75-b966-4a8bdff5f07f-kube-api-access-qhjd6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.098787 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.099479 4926 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.123006 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhjd6\" (UniqueName: \"kubernetes.io/projected/2afc1e0b-105d-4e75-b966-4a8bdff5f07f-kube-api-access-qhjd6\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.125945 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2afc1e0b-105d-4e75-b966-4a8bdff5f07f\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.159438 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.628908 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 11:35:14 crc kubenswrapper[4926]: I1122 11:35:14.697122 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2afc1e0b-105d-4e75-b966-4a8bdff5f07f","Type":"ContainerStarted","Data":"8ce051bc7c58da9e7775556cbcc5477b59d54ad7e9f1b2204f2f1e9759c7fa84"} Nov 22 11:35:16 crc kubenswrapper[4926]: I1122 11:35:16.729620 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2afc1e0b-105d-4e75-b966-4a8bdff5f07f","Type":"ContainerStarted","Data":"62dd56eda04df6759149de47837e9d240acd5ae18e68cb0383436fafd8b491d6"} Nov 22 11:35:16 crc kubenswrapper[4926]: I1122 11:35:16.746737 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.802242145 podStartE2EDuration="3.746695574s" podCreationTimestamp="2025-11-22 11:35:13 +0000 UTC" firstStartedPulling="2025-11-22 11:35:14.632623858 +0000 UTC m=+3334.934229145" lastFinishedPulling="2025-11-22 11:35:15.577077277 +0000 UTC m=+3335.878682574" observedRunningTime="2025-11-22 11:35:16.745323764 +0000 UTC m=+3337.046929051" watchObservedRunningTime="2025-11-22 11:35:16.746695574 +0000 UTC m=+3337.048300871" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.605519 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zcnnb/must-gather-qm8bc"] Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.607824 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.612805 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zcnnb"/"openshift-service-ca.crt" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.613439 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-zcnnb"/"kube-root-ca.crt" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.613974 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-zcnnb"/"default-dockercfg-c9cgc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.625440 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zcnnb/must-gather-qm8bc"] Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.721852 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.722347 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.824082 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.824778 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.825144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.843587 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8\") pod \"must-gather-qm8bc\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:39 crc kubenswrapper[4926]: I1122 11:35:39.926250 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:35:40 crc kubenswrapper[4926]: I1122 11:35:40.222210 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-zcnnb/must-gather-qm8bc"] Nov 22 11:35:40 crc kubenswrapper[4926]: W1122 11:35:40.227294 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf33381f_80dd_4c3b_988f_982a1b420fb1.slice/crio-2427957d97c3291e61a42943d1bcf436f2eb0fc14002963ba847634d478c3d61 WatchSource:0}: Error finding container 2427957d97c3291e61a42943d1bcf436f2eb0fc14002963ba847634d478c3d61: Status 404 returned error can't find the container with id 2427957d97c3291e61a42943d1bcf436f2eb0fc14002963ba847634d478c3d61 Nov 22 11:35:40 crc kubenswrapper[4926]: I1122 11:35:40.964628 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" event={"ID":"cf33381f-80dd-4c3b-988f-982a1b420fb1","Type":"ContainerStarted","Data":"2427957d97c3291e61a42943d1bcf436f2eb0fc14002963ba847634d478c3d61"} Nov 22 11:35:47 crc kubenswrapper[4926]: I1122 11:35:47.034607 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" event={"ID":"cf33381f-80dd-4c3b-988f-982a1b420fb1","Type":"ContainerStarted","Data":"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7"} Nov 22 11:35:48 crc kubenswrapper[4926]: I1122 11:35:48.050984 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" event={"ID":"cf33381f-80dd-4c3b-988f-982a1b420fb1","Type":"ContainerStarted","Data":"35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097"} Nov 22 11:35:48 crc kubenswrapper[4926]: I1122 11:35:48.068726 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" podStartSLOduration=2.7576240690000002 podStartE2EDuration="9.068699572s" podCreationTimestamp="2025-11-22 11:35:39 +0000 UTC" firstStartedPulling="2025-11-22 11:35:40.229255432 +0000 UTC m=+3360.530860719" lastFinishedPulling="2025-11-22 11:35:46.540330925 +0000 UTC m=+3366.841936222" observedRunningTime="2025-11-22 11:35:48.066313373 +0000 UTC m=+3368.367918690" watchObservedRunningTime="2025-11-22 11:35:48.068699572 +0000 UTC m=+3368.370304889" Nov 22 11:35:50 crc kubenswrapper[4926]: E1122 11:35:50.895752 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.248:60892->38.102.83.248:35555: write tcp 38.102.83.248:60892->38.102.83.248:35555: write: broken pipe Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.671589 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-fjbq8"] Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.673274 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.711099 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.711167 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82dcm\" (UniqueName: \"kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.812745 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.812794 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82dcm\" (UniqueName: \"kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.812919 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:51 crc kubenswrapper[4926]: I1122 11:35:51.833908 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82dcm\" (UniqueName: \"kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm\") pod \"crc-debug-fjbq8\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:52 crc kubenswrapper[4926]: I1122 11:35:52.006687 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:35:52 crc kubenswrapper[4926]: I1122 11:35:52.088719 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" event={"ID":"c7a3d06d-0e85-489a-b59f-b152275c0839","Type":"ContainerStarted","Data":"2266af621e16478bc4b14190407e54ef3e2a6cb9c2f782e984355ca04b1702e9"} Nov 22 11:36:03 crc kubenswrapper[4926]: I1122 11:36:03.184429 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" event={"ID":"c7a3d06d-0e85-489a-b59f-b152275c0839","Type":"ContainerStarted","Data":"8158ccc4425509d3212cf921e49aae9810eae49fd05ef2fa38e9e382663f5b9b"} Nov 22 11:36:03 crc kubenswrapper[4926]: I1122 11:36:03.203169 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" podStartSLOduration=1.70979738 podStartE2EDuration="12.203151806s" podCreationTimestamp="2025-11-22 11:35:51 +0000 UTC" firstStartedPulling="2025-11-22 11:35:52.040940631 +0000 UTC m=+3372.342545918" lastFinishedPulling="2025-11-22 11:36:02.534295057 +0000 UTC m=+3382.835900344" observedRunningTime="2025-11-22 11:36:03.197901226 +0000 UTC m=+3383.499506513" watchObservedRunningTime="2025-11-22 11:36:03.203151806 +0000 UTC m=+3383.504757093" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.059878 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.062606 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.066837 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.166733 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gq6d5\" (UniqueName: \"kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.166813 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.166981 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.269101 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.269161 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.269280 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gq6d5\" (UniqueName: \"kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.269608 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.269664 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.301741 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gq6d5\" (UniqueName: \"kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5\") pod \"certified-operators-pfnq9\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.391193 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:15 crc kubenswrapper[4926]: I1122 11:36:15.940634 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:16 crc kubenswrapper[4926]: I1122 11:36:16.294201 4926 generic.go:334] "Generic (PLEG): container finished" podID="5839bc26-c210-445e-b93d-26685aac8332" containerID="2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49" exitCode=0 Nov 22 11:36:16 crc kubenswrapper[4926]: I1122 11:36:16.294296 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerDied","Data":"2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49"} Nov 22 11:36:16 crc kubenswrapper[4926]: I1122 11:36:16.294533 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerStarted","Data":"3daebcdc66ac5d3f77b0206cdcc21e53bfd3576b01e0fc9fe97937e3d1d50d51"} Nov 22 11:36:17 crc kubenswrapper[4926]: I1122 11:36:17.304702 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerStarted","Data":"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20"} Nov 22 11:36:18 crc kubenswrapper[4926]: I1122 11:36:18.316844 4926 generic.go:334] "Generic (PLEG): container finished" podID="5839bc26-c210-445e-b93d-26685aac8332" containerID="f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20" exitCode=0 Nov 22 11:36:18 crc kubenswrapper[4926]: I1122 11:36:18.316963 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerDied","Data":"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20"} Nov 22 11:36:21 crc kubenswrapper[4926]: I1122 11:36:21.347313 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerStarted","Data":"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2"} Nov 22 11:36:21 crc kubenswrapper[4926]: I1122 11:36:21.371060 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pfnq9" podStartSLOduration=2.364490469 podStartE2EDuration="6.371037874s" podCreationTimestamp="2025-11-22 11:36:15 +0000 UTC" firstStartedPulling="2025-11-22 11:36:16.295560934 +0000 UTC m=+3396.597166221" lastFinishedPulling="2025-11-22 11:36:20.302108339 +0000 UTC m=+3400.603713626" observedRunningTime="2025-11-22 11:36:21.363171538 +0000 UTC m=+3401.664776835" watchObservedRunningTime="2025-11-22 11:36:21.371037874 +0000 UTC m=+3401.672643161" Nov 22 11:36:25 crc kubenswrapper[4926]: I1122 11:36:25.393822 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:25 crc kubenswrapper[4926]: I1122 11:36:25.394429 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:25 crc kubenswrapper[4926]: I1122 11:36:25.499276 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:26 crc kubenswrapper[4926]: I1122 11:36:26.477777 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:26 crc kubenswrapper[4926]: I1122 11:36:26.523515 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:28 crc kubenswrapper[4926]: I1122 11:36:28.434774 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pfnq9" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="registry-server" containerID="cri-o://232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2" gracePeriod=2 Nov 22 11:36:28 crc kubenswrapper[4926]: I1122 11:36:28.907273 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.028221 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content\") pod \"5839bc26-c210-445e-b93d-26685aac8332\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.028561 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gq6d5\" (UniqueName: \"kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5\") pod \"5839bc26-c210-445e-b93d-26685aac8332\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.028792 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities\") pod \"5839bc26-c210-445e-b93d-26685aac8332\" (UID: \"5839bc26-c210-445e-b93d-26685aac8332\") " Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.029393 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities" (OuterVolumeSpecName: "utilities") pod "5839bc26-c210-445e-b93d-26685aac8332" (UID: "5839bc26-c210-445e-b93d-26685aac8332"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.034602 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5" (OuterVolumeSpecName: "kube-api-access-gq6d5") pod "5839bc26-c210-445e-b93d-26685aac8332" (UID: "5839bc26-c210-445e-b93d-26685aac8332"). InnerVolumeSpecName "kube-api-access-gq6d5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.080436 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5839bc26-c210-445e-b93d-26685aac8332" (UID: "5839bc26-c210-445e-b93d-26685aac8332"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.133073 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.133103 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5839bc26-c210-445e-b93d-26685aac8332-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.133114 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gq6d5\" (UniqueName: \"kubernetes.io/projected/5839bc26-c210-445e-b93d-26685aac8332-kube-api-access-gq6d5\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.462699 4926 generic.go:334] "Generic (PLEG): container finished" podID="5839bc26-c210-445e-b93d-26685aac8332" containerID="232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2" exitCode=0 Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.462754 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerDied","Data":"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2"} Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.462789 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pfnq9" event={"ID":"5839bc26-c210-445e-b93d-26685aac8332","Type":"ContainerDied","Data":"3daebcdc66ac5d3f77b0206cdcc21e53bfd3576b01e0fc9fe97937e3d1d50d51"} Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.462810 4926 scope.go:117] "RemoveContainer" containerID="232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.463022 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pfnq9" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.514155 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.523923 4926 scope.go:117] "RemoveContainer" containerID="f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.532499 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pfnq9"] Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.549836 4926 scope.go:117] "RemoveContainer" containerID="2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.601475 4926 scope.go:117] "RemoveContainer" containerID="232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2" Nov 22 11:36:29 crc kubenswrapper[4926]: E1122 11:36:29.602039 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2\": container with ID starting with 232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2 not found: ID does not exist" containerID="232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.602071 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2"} err="failed to get container status \"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2\": rpc error: code = NotFound desc = could not find container \"232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2\": container with ID starting with 232b96c6ae888acede25380650144e08beb67e47abbd9f8d3fdb16c5d842e1e2 not found: ID does not exist" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.602093 4926 scope.go:117] "RemoveContainer" containerID="f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20" Nov 22 11:36:29 crc kubenswrapper[4926]: E1122 11:36:29.602340 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20\": container with ID starting with f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20 not found: ID does not exist" containerID="f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.602384 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20"} err="failed to get container status \"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20\": rpc error: code = NotFound desc = could not find container \"f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20\": container with ID starting with f366edebb48e29ca06d5105e7f43eb62aaafc2b0be54b3d29079b849f01fbb20 not found: ID does not exist" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.602398 4926 scope.go:117] "RemoveContainer" containerID="2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49" Nov 22 11:36:29 crc kubenswrapper[4926]: E1122 11:36:29.602842 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49\": container with ID starting with 2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49 not found: ID does not exist" containerID="2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49" Nov 22 11:36:29 crc kubenswrapper[4926]: I1122 11:36:29.602866 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49"} err="failed to get container status \"2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49\": rpc error: code = NotFound desc = could not find container \"2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49\": container with ID starting with 2924e1e77dd3ff6ce7613346e7643092581a2cb40743e74cf4f14a5569885c49 not found: ID does not exist" Nov 22 11:36:30 crc kubenswrapper[4926]: I1122 11:36:30.601988 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5839bc26-c210-445e-b93d-26685aac8332" path="/var/lib/kubelet/pods/5839bc26-c210-445e-b93d-26685aac8332/volumes" Nov 22 11:36:46 crc kubenswrapper[4926]: I1122 11:36:46.642711 4926 generic.go:334] "Generic (PLEG): container finished" podID="c7a3d06d-0e85-489a-b59f-b152275c0839" containerID="8158ccc4425509d3212cf921e49aae9810eae49fd05ef2fa38e9e382663f5b9b" exitCode=0 Nov 22 11:36:46 crc kubenswrapper[4926]: I1122 11:36:46.642809 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" event={"ID":"c7a3d06d-0e85-489a-b59f-b152275c0839","Type":"ContainerDied","Data":"8158ccc4425509d3212cf921e49aae9810eae49fd05ef2fa38e9e382663f5b9b"} Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.775539 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.819133 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-fjbq8"] Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.827789 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-fjbq8"] Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.919337 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host\") pod \"c7a3d06d-0e85-489a-b59f-b152275c0839\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.919461 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82dcm\" (UniqueName: \"kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm\") pod \"c7a3d06d-0e85-489a-b59f-b152275c0839\" (UID: \"c7a3d06d-0e85-489a-b59f-b152275c0839\") " Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.919600 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host" (OuterVolumeSpecName: "host") pod "c7a3d06d-0e85-489a-b59f-b152275c0839" (UID: "c7a3d06d-0e85-489a-b59f-b152275c0839"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.920029 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c7a3d06d-0e85-489a-b59f-b152275c0839-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:47 crc kubenswrapper[4926]: I1122 11:36:47.927107 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm" (OuterVolumeSpecName: "kube-api-access-82dcm") pod "c7a3d06d-0e85-489a-b59f-b152275c0839" (UID: "c7a3d06d-0e85-489a-b59f-b152275c0839"). InnerVolumeSpecName "kube-api-access-82dcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:36:48 crc kubenswrapper[4926]: I1122 11:36:48.022676 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82dcm\" (UniqueName: \"kubernetes.io/projected/c7a3d06d-0e85-489a-b59f-b152275c0839-kube-api-access-82dcm\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:48 crc kubenswrapper[4926]: I1122 11:36:48.603513 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7a3d06d-0e85-489a-b59f-b152275c0839" path="/var/lib/kubelet/pods/c7a3d06d-0e85-489a-b59f-b152275c0839/volumes" Nov 22 11:36:48 crc kubenswrapper[4926]: I1122 11:36:48.663527 4926 scope.go:117] "RemoveContainer" containerID="8158ccc4425509d3212cf921e49aae9810eae49fd05ef2fa38e9e382663f5b9b" Nov 22 11:36:48 crc kubenswrapper[4926]: I1122 11:36:48.663680 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-fjbq8" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.000695 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-v4hfz"] Nov 22 11:36:49 crc kubenswrapper[4926]: E1122 11:36:49.001910 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="extract-utilities" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.001935 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="extract-utilities" Nov 22 11:36:49 crc kubenswrapper[4926]: E1122 11:36:49.001967 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7a3d06d-0e85-489a-b59f-b152275c0839" containerName="container-00" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.001978 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7a3d06d-0e85-489a-b59f-b152275c0839" containerName="container-00" Nov 22 11:36:49 crc kubenswrapper[4926]: E1122 11:36:49.002020 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="registry-server" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.002033 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="registry-server" Nov 22 11:36:49 crc kubenswrapper[4926]: E1122 11:36:49.002067 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="extract-content" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.002077 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="extract-content" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.002421 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7a3d06d-0e85-489a-b59f-b152275c0839" containerName="container-00" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.002447 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="5839bc26-c210-445e-b93d-26685aac8332" containerName="registry-server" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.003524 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.043637 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjzkr\" (UniqueName: \"kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.043700 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.145433 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjzkr\" (UniqueName: \"kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.145495 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.145706 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.165605 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjzkr\" (UniqueName: \"kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr\") pod \"crc-debug-v4hfz\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.329931 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.675176 4926 generic.go:334] "Generic (PLEG): container finished" podID="3a384bb2-d820-494f-9aa3-ff62b7c7945b" containerID="4369c5d8adbf92ce506e78f85dc4af27322696fa9c2bbdc5d286bc88b83af0dc" exitCode=0 Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.675267 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" event={"ID":"3a384bb2-d820-494f-9aa3-ff62b7c7945b","Type":"ContainerDied","Data":"4369c5d8adbf92ce506e78f85dc4af27322696fa9c2bbdc5d286bc88b83af0dc"} Nov 22 11:36:49 crc kubenswrapper[4926]: I1122 11:36:49.675586 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" event={"ID":"3a384bb2-d820-494f-9aa3-ff62b7c7945b","Type":"ContainerStarted","Data":"a9fe867ebaf3cd5f469f00fc5d5a4829585167ed497dc9b3e35e4f4b11240b8b"} Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.165285 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-v4hfz"] Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.172436 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-v4hfz"] Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.793657 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.879612 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host\") pod \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.879752 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host" (OuterVolumeSpecName: "host") pod "3a384bb2-d820-494f-9aa3-ff62b7c7945b" (UID: "3a384bb2-d820-494f-9aa3-ff62b7c7945b"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.879793 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjzkr\" (UniqueName: \"kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr\") pod \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\" (UID: \"3a384bb2-d820-494f-9aa3-ff62b7c7945b\") " Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.880618 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3a384bb2-d820-494f-9aa3-ff62b7c7945b-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.901210 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr" (OuterVolumeSpecName: "kube-api-access-hjzkr") pod "3a384bb2-d820-494f-9aa3-ff62b7c7945b" (UID: "3a384bb2-d820-494f-9aa3-ff62b7c7945b"). InnerVolumeSpecName "kube-api-access-hjzkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:36:50 crc kubenswrapper[4926]: I1122 11:36:50.982735 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjzkr\" (UniqueName: \"kubernetes.io/projected/3a384bb2-d820-494f-9aa3-ff62b7c7945b-kube-api-access-hjzkr\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.435416 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-x6c84"] Nov 22 11:36:51 crc kubenswrapper[4926]: E1122 11:36:51.435913 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a384bb2-d820-494f-9aa3-ff62b7c7945b" containerName="container-00" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.435930 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a384bb2-d820-494f-9aa3-ff62b7c7945b" containerName="container-00" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.436200 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a384bb2-d820-494f-9aa3-ff62b7c7945b" containerName="container-00" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.437107 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.494186 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.494250 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj96k\" (UniqueName: \"kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.596172 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.596287 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj96k\" (UniqueName: \"kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.596438 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.615591 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj96k\" (UniqueName: \"kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k\") pod \"crc-debug-x6c84\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.701930 4926 scope.go:117] "RemoveContainer" containerID="4369c5d8adbf92ce506e78f85dc4af27322696fa9c2bbdc5d286bc88b83af0dc" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.702140 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-v4hfz" Nov 22 11:36:51 crc kubenswrapper[4926]: I1122 11:36:51.760028 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:51 crc kubenswrapper[4926]: W1122 11:36:51.796109 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod18ac4523_f3e1_4a47_9e4b_b62e8c09b961.slice/crio-cf768646b911957507479611cbeb1872ce6779738087ecb452cd8a4c2692686c WatchSource:0}: Error finding container cf768646b911957507479611cbeb1872ce6779738087ecb452cd8a4c2692686c: Status 404 returned error can't find the container with id cf768646b911957507479611cbeb1872ce6779738087ecb452cd8a4c2692686c Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.594396 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a384bb2-d820-494f-9aa3-ff62b7c7945b" path="/var/lib/kubelet/pods/3a384bb2-d820-494f-9aa3-ff62b7c7945b/volumes" Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.715403 4926 generic.go:334] "Generic (PLEG): container finished" podID="18ac4523-f3e1-4a47-9e4b-b62e8c09b961" containerID="29f610513c7fd1914f987759e8788f6cd6ee60460674eea447483bf8c8b7d06e" exitCode=0 Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.715469 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" event={"ID":"18ac4523-f3e1-4a47-9e4b-b62e8c09b961","Type":"ContainerDied","Data":"29f610513c7fd1914f987759e8788f6cd6ee60460674eea447483bf8c8b7d06e"} Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.715505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" event={"ID":"18ac4523-f3e1-4a47-9e4b-b62e8c09b961","Type":"ContainerStarted","Data":"cf768646b911957507479611cbeb1872ce6779738087ecb452cd8a4c2692686c"} Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.763906 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-x6c84"] Nov 22 11:36:52 crc kubenswrapper[4926]: I1122 11:36:52.772257 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zcnnb/crc-debug-x6c84"] Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.841798 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.944964 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj96k\" (UniqueName: \"kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k\") pod \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.945164 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host\") pod \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\" (UID: \"18ac4523-f3e1-4a47-9e4b-b62e8c09b961\") " Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.945207 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host" (OuterVolumeSpecName: "host") pod "18ac4523-f3e1-4a47-9e4b-b62e8c09b961" (UID: "18ac4523-f3e1-4a47-9e4b-b62e8c09b961"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.945553 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:53 crc kubenswrapper[4926]: I1122 11:36:53.959877 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k" (OuterVolumeSpecName: "kube-api-access-xj96k") pod "18ac4523-f3e1-4a47-9e4b-b62e8c09b961" (UID: "18ac4523-f3e1-4a47-9e4b-b62e8c09b961"). InnerVolumeSpecName "kube-api-access-xj96k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:36:54 crc kubenswrapper[4926]: I1122 11:36:54.047469 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj96k\" (UniqueName: \"kubernetes.io/projected/18ac4523-f3e1-4a47-9e4b-b62e8c09b961-kube-api-access-xj96k\") on node \"crc\" DevicePath \"\"" Nov 22 11:36:54 crc kubenswrapper[4926]: I1122 11:36:54.594629 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18ac4523-f3e1-4a47-9e4b-b62e8c09b961" path="/var/lib/kubelet/pods/18ac4523-f3e1-4a47-9e4b-b62e8c09b961/volumes" Nov 22 11:36:54 crc kubenswrapper[4926]: I1122 11:36:54.736367 4926 scope.go:117] "RemoveContainer" containerID="29f610513c7fd1914f987759e8788f6cd6ee60460674eea447483bf8c8b7d06e" Nov 22 11:36:54 crc kubenswrapper[4926]: I1122 11:36:54.736393 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/crc-debug-x6c84" Nov 22 11:37:07 crc kubenswrapper[4926]: I1122 11:37:07.736555 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-d84d6cb4b-w4kcl_d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b/barbican-api/0.log" Nov 22 11:37:07 crc kubenswrapper[4926]: I1122 11:37:07.921508 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-d84d6cb4b-w4kcl_d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b/barbican-api-log/0.log" Nov 22 11:37:07 crc kubenswrapper[4926]: I1122 11:37:07.997213 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5b6fb59ff8-cgr6h_5659fc64-a862-4d05-989e-4e667a4bb792/barbican-keystone-listener/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.022571 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5b6fb59ff8-cgr6h_5659fc64-a862-4d05-989e-4e667a4bb792/barbican-keystone-listener-log/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.182839 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6c6596fd55-5fshh_58c72eaf-f8f2-4333-8057-a9237457d73c/barbican-worker/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.212706 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6c6596fd55-5fshh_58c72eaf-f8f2-4333-8057-a9237457d73c/barbican-worker-log/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.455685 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n_9189297a-e5e2-47b3-9cf0-ac932c80f3bb/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.488926 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/ceilometer-central-agent/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.499799 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/ceilometer-notification-agent/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.612737 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/sg-core/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.625920 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/proxy-httpd/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.743728 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8232e2d5-3714-47a4-9739-2e370a17300b/cinder-api/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.824977 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8232e2d5-3714-47a4-9739-2e370a17300b/cinder-api-log/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.915523 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e8c6c748-bba9-4298-b0de-745cd26ccec4/cinder-scheduler/0.log" Nov 22 11:37:08 crc kubenswrapper[4926]: I1122 11:37:08.949471 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e8c6c748-bba9-4298-b0de-745cd26ccec4/probe/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.101333 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-76jbg_ab012855-82a0-4f87-97a7-e3c2d1490dda/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.161357 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-mk5df_e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.283012 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/init/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.475143 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/init/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.512422 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7v496_fae080f1-2e5d-463a-ae8e-0c29025a62a3/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.551744 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/dnsmasq-dns/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.677839 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_699ad142-80cd-4ee2-86ca-87c22cc7f39b/glance-httpd/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.768505 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_699ad142-80cd-4ee2-86ca-87c22cc7f39b/glance-log/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.861070 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_eaf541eb-314b-4f78-bdcc-66f5b43b0ed5/glance-log/0.log" Nov 22 11:37:09 crc kubenswrapper[4926]: I1122 11:37:09.903479 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_eaf541eb-314b-4f78-bdcc-66f5b43b0ed5/glance-httpd/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.019297 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-86dd5d599b-jndzq_a7c08c13-5c9c-42ac-8fdc-e651c26d97fc/horizon/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.209951 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-gqff6_f456f1f9-7676-4426-810c-6057111ed942/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.315325 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-86dd5d599b-jndzq_a7c08c13-5c9c-42ac-8fdc-e651c26d97fc/horizon-log/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.398562 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-jv8ww_4cd61881-efff-46ae-a9b8-ba641538d8e1/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.594702 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-68769dd845-84s2z_3c8571ff-d236-4cc6-aebe-ffa8be3ef604/keystone-api/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.611632 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29396821-jtvch_a8024291-de1f-49c8-bac5-b4d37978639d/keystone-cron/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.770192 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a5fc01d6-133f-4899-926b-3e4ff8c68f0b/kube-state-metrics/0.log" Nov 22 11:37:10 crc kubenswrapper[4926]: I1122 11:37:10.805853 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl_64e25bf4-8746-413f-a28b-264ddfb9feff/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:11 crc kubenswrapper[4926]: I1122 11:37:11.133399 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f7c4dcf85-jl8kd_3260200f-bc21-4521-9a62-2f67ab26f0df/neutron-api/0.log" Nov 22 11:37:11 crc kubenswrapper[4926]: I1122 11:37:11.230400 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f7c4dcf85-jl8kd_3260200f-bc21-4521-9a62-2f67ab26f0df/neutron-httpd/0.log" Nov 22 11:37:11 crc kubenswrapper[4926]: I1122 11:37:11.310001 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc_e65a3423-36b6-48c5-b170-989f64801105/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:11 crc kubenswrapper[4926]: I1122 11:37:11.764654 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_cb0852ee-dc75-43ee-88ec-7343197eca5f/nova-cell0-conductor-conductor/0.log" Nov 22 11:37:11 crc kubenswrapper[4926]: I1122 11:37:11.810459 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_95e7c80b-edf7-42be-892c-11557c816271/nova-api-log/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.098124 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_95e7c80b-edf7-42be-892c-11557c816271/nova-api-api/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.134841 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cea60836-1c25-4c6c-8f9e-e64ab97d459a/nova-cell1-conductor-conductor/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.147536 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.409289 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-554th_61e72bc5-b152-4df1-95ee-bb47a81514ff/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.434469 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_468e2351-8b2d-4e90-bf03-218570d63fd9/nova-metadata-log/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.759547 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_360b8f0a-6a7b-4772-839f-cab107433443/nova-scheduler-scheduler/0.log" Nov 22 11:37:12 crc kubenswrapper[4926]: I1122 11:37:12.834063 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/mysql-bootstrap/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.076173 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/mysql-bootstrap/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.095863 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/galera/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.298478 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/mysql-bootstrap/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.473439 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/mysql-bootstrap/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.517053 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/galera/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.520261 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_468e2351-8b2d-4e90-bf03-218570d63fd9/nova-metadata-metadata/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.717505 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_6c186926-85fd-4c52-9910-48a3c70ae9eb/openstackclient/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.734352 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fvp8n_d2154a83-1eaa-44bc-ade8-754245e919b2/openstack-network-exporter/0.log" Nov 22 11:37:13 crc kubenswrapper[4926]: I1122 11:37:13.934190 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server-init/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.101007 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server-init/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.121168 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovs-vswitchd/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.122157 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.256872 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-pwfdl_631757e2-e40e-4cc6-a2a3-601c749669b2/ovn-controller/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.367106 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-xgx5g_060a3d68-c5b3-4788-8c63-ce0b6d67acc5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.507526 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d90df493-f9a0-4774-bd2e-6b96bbfebf31/openstack-network-exporter/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.601590 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d90df493-f9a0-4774-bd2e-6b96bbfebf31/ovn-northd/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.720036 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_50a6898f-08ef-48de-bcc5-35b49915cff6/ovsdbserver-nb/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.727249 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_50a6898f-08ef-48de-bcc5-35b49915cff6/openstack-network-exporter/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.872357 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0/openstack-network-exporter/0.log" Nov 22 11:37:14 crc kubenswrapper[4926]: I1122 11:37:14.940625 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0/ovsdbserver-sb/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.130290 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65b67ff7d-d2fkp_c5fcfb96-741e-467c-971f-762618aa54d5/placement-api/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.151474 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/setup-container/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.154894 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65b67ff7d-d2fkp_c5fcfb96-741e-467c-971f-762618aa54d5/placement-log/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.396910 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/rabbitmq/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.429319 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/setup-container/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.495240 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/setup-container/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.671944 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/rabbitmq/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.718806 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/setup-container/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.774523 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq_07bbb761-300d-4592-9c67-27e85a79e770/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.930466 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-k2x5v_5241dfa6-bfdd-495c-8853-135648e0c112/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:15 crc kubenswrapper[4926]: I1122 11:37:15.977676 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p_9c3831ca-e426-4b08-ad83-050cbedbd547/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.166744 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-td6g4_ca7c4fa4-7055-4d7a-9147-ae64dd195ae1/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.227716 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-fvg2t_474538df-0433-4fb9-b2c2-ed291078d237/ssh-known-hosts-edpm-deployment/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.409680 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-74969bfb89-zx2cm_033cb6e2-4f4b-46e3-a28f-61f904e65d4b/proxy-server/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.697797 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-74969bfb89-zx2cm_033cb6e2-4f4b-46e3-a28f-61f904e65d4b/proxy-httpd/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.781602 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-bn9s6_06d59088-e96c-45eb-aba8-00382ceaa48a/swift-ring-rebalance/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.887323 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-reaper/0.log" Nov 22 11:37:16 crc kubenswrapper[4926]: I1122 11:37:16.894367 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-auditor/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.010732 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-server/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.026991 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-replicator/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.108172 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-auditor/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.130854 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-replicator/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.233105 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-updater/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.268065 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-server/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.352620 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-auditor/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.359493 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-expirer/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.475653 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-replicator/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.511968 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-server/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.545470 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/rsync/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.547135 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-updater/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.677681 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/swift-recon-cron/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.864805 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n_b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:17 crc kubenswrapper[4926]: I1122 11:37:17.961167 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_588c20c1-2673-4c55-9dc4-1e20448b5adb/tempest-tests-tempest-tests-runner/0.log" Nov 22 11:37:18 crc kubenswrapper[4926]: I1122 11:37:18.080915 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2afc1e0b-105d-4e75-b966-4a8bdff5f07f/test-operator-logs-container/0.log" Nov 22 11:37:18 crc kubenswrapper[4926]: I1122 11:37:18.154225 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp_ea024223-c658-4c22-9318-8eb14052b38f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:37:26 crc kubenswrapper[4926]: I1122 11:37:26.009375 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6f000ebf-57ae-4f00-9aaf-7583a9ec4abb/memcached/0.log" Nov 22 11:37:39 crc kubenswrapper[4926]: I1122 11:37:39.661076 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:37:39 crc kubenswrapper[4926]: I1122 11:37:39.661487 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.053738 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.187592 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.245091 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.245309 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.402350 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.414916 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.468961 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/extract/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.610954 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-697c78f669-dfq9w_f8406cda-67f4-425a-83f1-ab90cf4ebf0c/kube-rbac-proxy/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.655002 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-697c78f669-dfq9w_f8406cda-67f4-425a-83f1-ab90cf4ebf0c/manager/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.684351 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-cttxc_644aaf3f-48c2-4789-9775-18ed3ae24fd7/kube-rbac-proxy/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.842702 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-8p4kn_02d4d3c4-4951-4f41-8605-239ac95dae92/kube-rbac-proxy/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.860277 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-cttxc_644aaf3f-48c2-4789-9775-18ed3ae24fd7/manager/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.882478 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-8p4kn_02d4d3c4-4951-4f41-8605-239ac95dae92/manager/0.log" Nov 22 11:37:41 crc kubenswrapper[4926]: I1122 11:37:41.989008 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-qshq6_3ebbbdf8-da82-4f02-a8f5-509de3b56721/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.101247 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-qshq6_3ebbbdf8-da82-4f02-a8f5-509de3b56721/manager/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.176047 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7869d7c46b-np8cn_bf6721b8-a1f6-4d27-ad5a-c090e2dc8806/manager/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.191228 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7869d7c46b-np8cn_bf6721b8-a1f6-4d27-ad5a-c090e2dc8806/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.271458 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-hls4w_f3502c04-7310-4659-aa47-b91b71ff3b30/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.369636 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-hls4w_f3502c04-7310-4659-aa47-b91b71ff3b30/manager/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.431708 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7875d8bb94-pr7tn_dc80ed79-7a34-4756-b5ed-0b3cda532910/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.576298 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7875d8bb94-pr7tn_dc80ed79-7a34-4756-b5ed-0b3cda532910/manager/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.598984 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-7j69z_46528db3-6717-4abb-a779-33290ae0c986/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.625667 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-7j69z_46528db3-6717-4abb-a779-33290ae0c986/manager/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.770479 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-dvdzj_7e28261c-db91-4143-a418-1114acf60dc0/kube-rbac-proxy/0.log" Nov 22 11:37:42 crc kubenswrapper[4926]: I1122 11:37:42.830100 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-dvdzj_7e28261c-db91-4143-a418-1114acf60dc0/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.005599 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-r2ctj_0996e99c-8565-426e-afa0-8a52ff2bee16/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.009953 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-r2ctj_0996e99c-8565-426e-afa0-8a52ff2bee16/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.030711 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5f449d8fbc-bfqxw_72b66cb9-cb2a-4977-a3f1-3fe22508641e/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.202519 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5f449d8fbc-bfqxw_72b66cb9-cb2a-4977-a3f1-3fe22508641e/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.206544 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-669dc6ff5f-crkkv_0126a31b-68bb-46a7-8f3a-f34ad5d74e6d/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.274004 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-669dc6ff5f-crkkv_0126a31b-68bb-46a7-8f3a-f34ad5d74e6d/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.372573 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zjkmb_8b039ede-62fc-47ed-83ed-672e756887a1/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.468069 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zjkmb_8b039ede-62fc-47ed-83ed-672e756887a1/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.584524 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-c5z8p_71c1201e-62bb-4d32-945b-80cda1ff41ac/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.584811 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-c5z8p_71c1201e-62bb-4d32-945b-80cda1ff41ac/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.697998 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss_4c6f9a58-d6f5-426f-bb8d-e019401a015a/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.777133 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss_4c6f9a58-d6f5-426f-bb8d-e019401a015a/manager/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.873467 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-64844fbb8-hngj4_d292d5fa-12ea-40d0-a6df-1f6e9f5c8059/kube-rbac-proxy/0.log" Nov 22 11:37:43 crc kubenswrapper[4926]: I1122 11:37:43.973915 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-76ffdb7f4-g9zx8_391d1daa-3379-45e6-be55-fb2c3e1d304a/kube-rbac-proxy/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.188502 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-56rwr_cc73291c-a3b1-4641-95a2-454130fe25f5/registry-server/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.264292 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-76ffdb7f4-g9zx8_391d1daa-3379-45e6-be55-fb2c3e1d304a/operator/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.390290 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-587df66445-2hwd8_355d4b1d-9137-4cf5-aac8-e373d1b7d696/kube-rbac-proxy/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.572039 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-587df66445-2hwd8_355d4b1d-9137-4cf5-aac8-e373d1b7d696/manager/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.672819 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-pgzg9_ec00fa84-7dd0-46d6-b9f2-4a7b687b347b/kube-rbac-proxy/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.788779 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-pgzg9_ec00fa84-7dd0-46d6-b9f2-4a7b687b347b/manager/0.log" Nov 22 11:37:44 crc kubenswrapper[4926]: I1122 11:37:44.854697 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq_3ab27f1b-e328-46d1-b9e5-b29e2caedef6/operator/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.001469 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-j7xg8_e30ebbd3-daab-4ee4-acea-631c15b5045b/manager/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.044625 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-64844fbb8-hngj4_d292d5fa-12ea-40d0-a6df-1f6e9f5c8059/manager/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.072113 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-j7xg8_e30ebbd3-daab-4ee4-acea-631c15b5045b/kube-rbac-proxy/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.110648 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-95jv5_6c866ac0-e106-4a90-a223-435b244634b5/kube-rbac-proxy/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.229362 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-95jv5_6c866ac0-e106-4a90-a223-435b244634b5/manager/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.263034 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6f44bf845f-7vhg5_dab1442d-6ad4-4d03-b520-a12d7a4d6c9d/kube-rbac-proxy/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.289642 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6f44bf845f-7vhg5_dab1442d-6ad4-4d03-b520-a12d7a4d6c9d/manager/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.409640 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-85494d54fc-czf4h_3947549a-e067-4135-ba36-1e2663db15c0/kube-rbac-proxy/0.log" Nov 22 11:37:45 crc kubenswrapper[4926]: I1122 11:37:45.480905 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-85494d54fc-czf4h_3947549a-e067-4135-ba36-1e2663db15c0/manager/0.log" Nov 22 11:38:01 crc kubenswrapper[4926]: I1122 11:38:01.310575 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-rkbd7_0fd98b4c-0217-4784-8bbd-b0ec0680a611/control-plane-machine-set-operator/0.log" Nov 22 11:38:01 crc kubenswrapper[4926]: I1122 11:38:01.523039 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-jkqsx_e0882887-a6d9-4aac-a7d7-c14b934298e2/kube-rbac-proxy/0.log" Nov 22 11:38:01 crc kubenswrapper[4926]: I1122 11:38:01.554280 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-jkqsx_e0882887-a6d9-4aac-a7d7-c14b934298e2/machine-api-operator/0.log" Nov 22 11:38:09 crc kubenswrapper[4926]: I1122 11:38:09.661421 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:38:09 crc kubenswrapper[4926]: I1122 11:38:09.662001 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:38:13 crc kubenswrapper[4926]: I1122 11:38:13.875720 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-xpqf5_473a1f27-e3c6-4c74-9daf-da6ae42cc754/cert-manager-controller/0.log" Nov 22 11:38:14 crc kubenswrapper[4926]: I1122 11:38:14.077374 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-zzrcx_46c8cced-eb2e-409c-9923-f28c5924e5b1/cert-manager-cainjector/0.log" Nov 22 11:38:14 crc kubenswrapper[4926]: I1122 11:38:14.105005 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-w4h2r_ffec6625-ab3c-4e67-af68-afdbe4210730/cert-manager-webhook/0.log" Nov 22 11:38:26 crc kubenswrapper[4926]: I1122 11:38:26.825557 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-slspm_1d24ff8c-3a27-452a-a473-90e139c30740/nmstate-console-plugin/0.log" Nov 22 11:38:27 crc kubenswrapper[4926]: I1122 11:38:27.025267 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-vfdhw_1b73e906-db9e-454d-8316-2266a666d683/kube-rbac-proxy/0.log" Nov 22 11:38:27 crc kubenswrapper[4926]: I1122 11:38:27.032525 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-vfdhw_1b73e906-db9e-454d-8316-2266a666d683/nmstate-metrics/0.log" Nov 22 11:38:27 crc kubenswrapper[4926]: I1122 11:38:27.037987 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lt5kb_cae34914-f89d-4a66-bb66-901024424e79/nmstate-handler/0.log" Nov 22 11:38:27 crc kubenswrapper[4926]: I1122 11:38:27.213678 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-zttzw_c24318e9-ff38-4221-8931-046cb1c39368/nmstate-webhook/0.log" Nov 22 11:38:27 crc kubenswrapper[4926]: I1122 11:38:27.281776 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-2m244_251bb93a-68e8-4e17-98ac-0dc9c7f31ace/nmstate-operator/0.log" Nov 22 11:38:39 crc kubenswrapper[4926]: I1122 11:38:39.660755 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:38:39 crc kubenswrapper[4926]: I1122 11:38:39.661324 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:38:39 crc kubenswrapper[4926]: I1122 11:38:39.661376 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:38:39 crc kubenswrapper[4926]: I1122 11:38:39.662290 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:38:39 crc kubenswrapper[4926]: I1122 11:38:39.662377 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e" gracePeriod=600 Nov 22 11:38:40 crc kubenswrapper[4926]: I1122 11:38:40.726910 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e" exitCode=0 Nov 22 11:38:40 crc kubenswrapper[4926]: I1122 11:38:40.726977 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e"} Nov 22 11:38:40 crc kubenswrapper[4926]: I1122 11:38:40.727595 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403"} Nov 22 11:38:40 crc kubenswrapper[4926]: I1122 11:38:40.727624 4926 scope.go:117] "RemoveContainer" containerID="a2b8252229cd7206a620d1ff730f422361fddf9e56f7c2a21e9e2dbcd158d5c6" Nov 22 11:38:41 crc kubenswrapper[4926]: I1122 11:38:41.784399 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-pv5tc_1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc/kube-rbac-proxy/0.log" Nov 22 11:38:41 crc kubenswrapper[4926]: I1122 11:38:41.888304 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-pv5tc_1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc/controller/0.log" Nov 22 11:38:41 crc kubenswrapper[4926]: I1122 11:38:41.973835 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.111802 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.136336 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.158811 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.203409 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.359752 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.386365 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.393012 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.419944 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.604950 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.631195 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.634801 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.640141 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/controller/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.794517 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/frr-metrics/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.842180 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/kube-rbac-proxy-frr/0.log" Nov 22 11:38:42 crc kubenswrapper[4926]: I1122 11:38:42.842373 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/kube-rbac-proxy/0.log" Nov 22 11:38:43 crc kubenswrapper[4926]: I1122 11:38:43.004670 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/reloader/0.log" Nov 22 11:38:43 crc kubenswrapper[4926]: I1122 11:38:43.052334 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-ndwkc_e8452d37-6eed-427b-9741-bda6aea54331/frr-k8s-webhook-server/0.log" Nov 22 11:38:43 crc kubenswrapper[4926]: I1122 11:38:43.265764 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7d858964b4-hd89m_afb6b154-40e5-4285-9f49-38053bdbb6c4/manager/0.log" Nov 22 11:38:43 crc kubenswrapper[4926]: I1122 11:38:43.410788 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-676845568d-nb86k_63e553c4-290f-4b65-a563-b57f0577c982/webhook-server/0.log" Nov 22 11:38:43 crc kubenswrapper[4926]: I1122 11:38:43.551336 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xg4ns_1302de5c-2784-4974-b2ac-3572fc73e1d9/kube-rbac-proxy/0.log" Nov 22 11:38:44 crc kubenswrapper[4926]: I1122 11:38:44.120558 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xg4ns_1302de5c-2784-4974-b2ac-3572fc73e1d9/speaker/0.log" Nov 22 11:38:44 crc kubenswrapper[4926]: I1122 11:38:44.152434 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/frr/0.log" Nov 22 11:38:56 crc kubenswrapper[4926]: I1122 11:38:56.710968 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:38:56 crc kubenswrapper[4926]: I1122 11:38:56.806859 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:38:56 crc kubenswrapper[4926]: I1122 11:38:56.839336 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:38:56 crc kubenswrapper[4926]: I1122 11:38:56.870792 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.036461 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.040090 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.048365 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/extract/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.194319 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.371577 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.413362 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.414385 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.564110 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.578597 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:38:57 crc kubenswrapper[4926]: I1122 11:38:57.825799 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.008546 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.050175 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.082023 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.099164 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/registry-server/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.234373 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.313283 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.476487 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.618604 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/registry-server/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.618771 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.652035 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.686450 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.831990 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.882099 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.885205 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/extract/0.log" Nov 22 11:38:58 crc kubenswrapper[4926]: I1122 11:38:58.993521 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-b7gxf_b71cda98-e97f-4b9c-93d9-74c8cabe6420/marketplace-operator/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.049608 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.256925 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.264162 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.270039 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.415216 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.421664 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.500878 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/registry-server/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.575354 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.749761 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.762368 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.763098 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.926866 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:38:59 crc kubenswrapper[4926]: I1122 11:38:59.974619 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:39:00 crc kubenswrapper[4926]: I1122 11:39:00.412512 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/registry-server/0.log" Nov 22 11:39:31 crc kubenswrapper[4926]: E1122 11:39:31.154305 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.248:33766->38.102.83.248:35555: write tcp 38.102.83.248:33766->38.102.83.248:35555: write: broken pipe Nov 22 11:40:35 crc kubenswrapper[4926]: I1122 11:40:35.966182 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerID="2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7" exitCode=0 Nov 22 11:40:35 crc kubenswrapper[4926]: I1122 11:40:35.966241 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" event={"ID":"cf33381f-80dd-4c3b-988f-982a1b420fb1","Type":"ContainerDied","Data":"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7"} Nov 22 11:40:35 crc kubenswrapper[4926]: I1122 11:40:35.967526 4926 scope.go:117] "RemoveContainer" containerID="2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7" Nov 22 11:40:36 crc kubenswrapper[4926]: I1122 11:40:36.616110 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zcnnb_must-gather-qm8bc_cf33381f-80dd-4c3b-988f-982a1b420fb1/gather/0.log" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.005085 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-zcnnb/must-gather-qm8bc"] Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.006138 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="copy" containerID="cri-o://35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097" gracePeriod=2 Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.017337 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-zcnnb/must-gather-qm8bc"] Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.421569 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zcnnb_must-gather-qm8bc_cf33381f-80dd-4c3b-988f-982a1b420fb1/copy/0.log" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.422189 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.535391 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8\") pod \"cf33381f-80dd-4c3b-988f-982a1b420fb1\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.535554 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output\") pod \"cf33381f-80dd-4c3b-988f-982a1b420fb1\" (UID: \"cf33381f-80dd-4c3b-988f-982a1b420fb1\") " Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.541491 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8" (OuterVolumeSpecName: "kube-api-access-jlkl8") pod "cf33381f-80dd-4c3b-988f-982a1b420fb1" (UID: "cf33381f-80dd-4c3b-988f-982a1b420fb1"). InnerVolumeSpecName "kube-api-access-jlkl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.637556 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlkl8\" (UniqueName: \"kubernetes.io/projected/cf33381f-80dd-4c3b-988f-982a1b420fb1-kube-api-access-jlkl8\") on node \"crc\" DevicePath \"\"" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.666233 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "cf33381f-80dd-4c3b-988f-982a1b420fb1" (UID: "cf33381f-80dd-4c3b-988f-982a1b420fb1"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:40:44 crc kubenswrapper[4926]: I1122 11:40:44.740066 4926 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/cf33381f-80dd-4c3b-988f-982a1b420fb1-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.091316 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-zcnnb_must-gather-qm8bc_cf33381f-80dd-4c3b-988f-982a1b420fb1/copy/0.log" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.092055 4926 generic.go:334] "Generic (PLEG): container finished" podID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerID="35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097" exitCode=143 Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.092110 4926 scope.go:117] "RemoveContainer" containerID="35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.092119 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-zcnnb/must-gather-qm8bc" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.115834 4926 scope.go:117] "RemoveContainer" containerID="2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.188048 4926 scope.go:117] "RemoveContainer" containerID="35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097" Nov 22 11:40:45 crc kubenswrapper[4926]: E1122 11:40:45.188528 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097\": container with ID starting with 35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097 not found: ID does not exist" containerID="35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.188569 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097"} err="failed to get container status \"35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097\": rpc error: code = NotFound desc = could not find container \"35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097\": container with ID starting with 35c58ac64c17bbc1c4353719738d0a05f4f00adeacd99faa9552ca67a72ed097 not found: ID does not exist" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.188596 4926 scope.go:117] "RemoveContainer" containerID="2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7" Nov 22 11:40:45 crc kubenswrapper[4926]: E1122 11:40:45.189059 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7\": container with ID starting with 2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7 not found: ID does not exist" containerID="2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7" Nov 22 11:40:45 crc kubenswrapper[4926]: I1122 11:40:45.189090 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7"} err="failed to get container status \"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7\": rpc error: code = NotFound desc = could not find container \"2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7\": container with ID starting with 2a8fe4ed84722d5ad60ff453a695f2b44f4f924ebd2473a49dee989aad9ab1f7 not found: ID does not exist" Nov 22 11:40:46 crc kubenswrapper[4926]: I1122 11:40:46.600812 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" path="/var/lib/kubelet/pods/cf33381f-80dd-4c3b-988f-982a1b420fb1/volumes" Nov 22 11:41:09 crc kubenswrapper[4926]: I1122 11:41:09.661496 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:41:09 crc kubenswrapper[4926]: I1122 11:41:09.662032 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.198009 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:38 crc kubenswrapper[4926]: E1122 11:41:38.199597 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18ac4523-f3e1-4a47-9e4b-b62e8c09b961" containerName="container-00" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.199663 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="18ac4523-f3e1-4a47-9e4b-b62e8c09b961" containerName="container-00" Nov 22 11:41:38 crc kubenswrapper[4926]: E1122 11:41:38.199755 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="copy" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.199776 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="copy" Nov 22 11:41:38 crc kubenswrapper[4926]: E1122 11:41:38.199804 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="gather" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.199824 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="gather" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.200341 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="copy" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.200390 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf33381f-80dd-4c3b-988f-982a1b420fb1" containerName="gather" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.200447 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="18ac4523-f3e1-4a47-9e4b-b62e8c09b961" containerName="container-00" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.203964 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.220691 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.309216 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.309293 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.309372 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj9nr\" (UniqueName: \"kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.410966 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.411051 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.411138 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj9nr\" (UniqueName: \"kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.411495 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.411704 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.437488 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj9nr\" (UniqueName: \"kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr\") pod \"community-operators-rsxwn\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:38 crc kubenswrapper[4926]: I1122 11:41:38.542002 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.095841 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.661493 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.661613 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.690064 4926 generic.go:334] "Generic (PLEG): container finished" podID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerID="98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c" exitCode=0 Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.690135 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerDied","Data":"98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c"} Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.690176 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerStarted","Data":"03aa3b64468f711234eec2c4636625b5dcc2d99d7b09ea895f616faff40c7323"} Nov 22 11:41:39 crc kubenswrapper[4926]: I1122 11:41:39.697274 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:41:40 crc kubenswrapper[4926]: I1122 11:41:40.704018 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerStarted","Data":"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea"} Nov 22 11:41:41 crc kubenswrapper[4926]: I1122 11:41:41.715512 4926 generic.go:334] "Generic (PLEG): container finished" podID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerID="7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea" exitCode=0 Nov 22 11:41:41 crc kubenswrapper[4926]: I1122 11:41:41.715586 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerDied","Data":"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea"} Nov 22 11:41:42 crc kubenswrapper[4926]: I1122 11:41:42.727981 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerStarted","Data":"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818"} Nov 22 11:41:42 crc kubenswrapper[4926]: I1122 11:41:42.751296 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rsxwn" podStartSLOduration=2.316420014 podStartE2EDuration="4.751277217s" podCreationTimestamp="2025-11-22 11:41:38 +0000 UTC" firstStartedPulling="2025-11-22 11:41:39.694664569 +0000 UTC m=+3719.996269896" lastFinishedPulling="2025-11-22 11:41:42.129521802 +0000 UTC m=+3722.431127099" observedRunningTime="2025-11-22 11:41:42.744141482 +0000 UTC m=+3723.045746789" watchObservedRunningTime="2025-11-22 11:41:42.751277217 +0000 UTC m=+3723.052882504" Nov 22 11:41:48 crc kubenswrapper[4926]: I1122 11:41:48.542269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:48 crc kubenswrapper[4926]: I1122 11:41:48.542725 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:48 crc kubenswrapper[4926]: I1122 11:41:48.610555 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:48 crc kubenswrapper[4926]: I1122 11:41:48.850579 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:48 crc kubenswrapper[4926]: I1122 11:41:48.896462 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:50 crc kubenswrapper[4926]: I1122 11:41:50.811747 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rsxwn" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="registry-server" containerID="cri-o://2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818" gracePeriod=2 Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.794020 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.796760 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities\") pod \"bcd410ce-a853-429d-94ee-36fad8056ba3\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.796822 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fj9nr\" (UniqueName: \"kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr\") pod \"bcd410ce-a853-429d-94ee-36fad8056ba3\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.796961 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content\") pod \"bcd410ce-a853-429d-94ee-36fad8056ba3\" (UID: \"bcd410ce-a853-429d-94ee-36fad8056ba3\") " Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.798938 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities" (OuterVolumeSpecName: "utilities") pod "bcd410ce-a853-429d-94ee-36fad8056ba3" (UID: "bcd410ce-a853-429d-94ee-36fad8056ba3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.802494 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr" (OuterVolumeSpecName: "kube-api-access-fj9nr") pod "bcd410ce-a853-429d-94ee-36fad8056ba3" (UID: "bcd410ce-a853-429d-94ee-36fad8056ba3"). InnerVolumeSpecName "kube-api-access-fj9nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.829560 4926 generic.go:334] "Generic (PLEG): container finished" podID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerID="2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818" exitCode=0 Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.829611 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerDied","Data":"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818"} Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.829651 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsxwn" event={"ID":"bcd410ce-a853-429d-94ee-36fad8056ba3","Type":"ContainerDied","Data":"03aa3b64468f711234eec2c4636625b5dcc2d99d7b09ea895f616faff40c7323"} Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.829673 4926 scope.go:117] "RemoveContainer" containerID="2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.832159 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsxwn" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.862110 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bcd410ce-a853-429d-94ee-36fad8056ba3" (UID: "bcd410ce-a853-429d-94ee-36fad8056ba3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.889150 4926 scope.go:117] "RemoveContainer" containerID="7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.899110 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.899149 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fj9nr\" (UniqueName: \"kubernetes.io/projected/bcd410ce-a853-429d-94ee-36fad8056ba3-kube-api-access-fj9nr\") on node \"crc\" DevicePath \"\"" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.899165 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcd410ce-a853-429d-94ee-36fad8056ba3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.920298 4926 scope.go:117] "RemoveContainer" containerID="98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.962343 4926 scope.go:117] "RemoveContainer" containerID="2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818" Nov 22 11:41:51 crc kubenswrapper[4926]: E1122 11:41:51.962725 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818\": container with ID starting with 2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818 not found: ID does not exist" containerID="2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.962796 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818"} err="failed to get container status \"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818\": rpc error: code = NotFound desc = could not find container \"2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818\": container with ID starting with 2092127b94ee69ec487c2913b211a3e5dfda70d149d316b06cf15cf7b14f5818 not found: ID does not exist" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.962824 4926 scope.go:117] "RemoveContainer" containerID="7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea" Nov 22 11:41:51 crc kubenswrapper[4926]: E1122 11:41:51.963223 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea\": container with ID starting with 7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea not found: ID does not exist" containerID="7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.963247 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea"} err="failed to get container status \"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea\": rpc error: code = NotFound desc = could not find container \"7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea\": container with ID starting with 7550f0465db62e0ec493f087f981fc7fc9955fb11d9d593ebeb2092b665044ea not found: ID does not exist" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.963261 4926 scope.go:117] "RemoveContainer" containerID="98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c" Nov 22 11:41:51 crc kubenswrapper[4926]: E1122 11:41:51.963530 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c\": container with ID starting with 98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c not found: ID does not exist" containerID="98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c" Nov 22 11:41:51 crc kubenswrapper[4926]: I1122 11:41:51.963657 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c"} err="failed to get container status \"98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c\": rpc error: code = NotFound desc = could not find container \"98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c\": container with ID starting with 98f179d1785231b3fb2db4f06fc8aa8f17eab6bde0308a7f7c646cd695520b4c not found: ID does not exist" Nov 22 11:41:52 crc kubenswrapper[4926]: I1122 11:41:52.203372 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:52 crc kubenswrapper[4926]: I1122 11:41:52.211623 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rsxwn"] Nov 22 11:41:52 crc kubenswrapper[4926]: I1122 11:41:52.596009 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" path="/var/lib/kubelet/pods/bcd410ce-a853-429d-94ee-36fad8056ba3/volumes" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.106344 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:03 crc kubenswrapper[4926]: E1122 11:42:03.107653 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="extract-content" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.107712 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="extract-content" Nov 22 11:42:03 crc kubenswrapper[4926]: E1122 11:42:03.107772 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="extract-utilities" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.107785 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="extract-utilities" Nov 22 11:42:03 crc kubenswrapper[4926]: E1122 11:42:03.107829 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="registry-server" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.107843 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="registry-server" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.108219 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcd410ce-a853-429d-94ee-36fad8056ba3" containerName="registry-server" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.110737 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.121211 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.237992 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.238061 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5x4n\" (UniqueName: \"kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.238093 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.340206 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.340589 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5x4n\" (UniqueName: \"kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.340771 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.340670 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.341294 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.363696 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5x4n\" (UniqueName: \"kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n\") pod \"redhat-operators-h5564\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.452036 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.929351 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:03 crc kubenswrapper[4926]: I1122 11:42:03.981202 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerStarted","Data":"b1174ccbcd4c45bede47a0e6e8035777f33c760d0cd9ada1beb02c0da53a8748"} Nov 22 11:42:04 crc kubenswrapper[4926]: I1122 11:42:04.995398 4926 generic.go:334] "Generic (PLEG): container finished" podID="4336a042-f156-4c24-8981-63101df996f9" containerID="fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3" exitCode=0 Nov 22 11:42:04 crc kubenswrapper[4926]: I1122 11:42:04.995515 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerDied","Data":"fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3"} Nov 22 11:42:07 crc kubenswrapper[4926]: I1122 11:42:07.032849 4926 generic.go:334] "Generic (PLEG): container finished" podID="4336a042-f156-4c24-8981-63101df996f9" containerID="bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1" exitCode=0 Nov 22 11:42:07 crc kubenswrapper[4926]: I1122 11:42:07.032918 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerDied","Data":"bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1"} Nov 22 11:42:08 crc kubenswrapper[4926]: I1122 11:42:08.047205 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerStarted","Data":"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445"} Nov 22 11:42:08 crc kubenswrapper[4926]: I1122 11:42:08.070474 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h5564" podStartSLOduration=2.52358503 podStartE2EDuration="5.070436171s" podCreationTimestamp="2025-11-22 11:42:03 +0000 UTC" firstStartedPulling="2025-11-22 11:42:04.998538953 +0000 UTC m=+3745.300144250" lastFinishedPulling="2025-11-22 11:42:07.545390094 +0000 UTC m=+3747.846995391" observedRunningTime="2025-11-22 11:42:08.066147788 +0000 UTC m=+3748.367753115" watchObservedRunningTime="2025-11-22 11:42:08.070436171 +0000 UTC m=+3748.372041488" Nov 22 11:42:09 crc kubenswrapper[4926]: I1122 11:42:09.663430 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:42:09 crc kubenswrapper[4926]: I1122 11:42:09.663802 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:42:09 crc kubenswrapper[4926]: I1122 11:42:09.663847 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:42:09 crc kubenswrapper[4926]: I1122 11:42:09.664637 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:42:09 crc kubenswrapper[4926]: I1122 11:42:09.664682 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" gracePeriod=600 Nov 22 11:42:09 crc kubenswrapper[4926]: E1122 11:42:09.792048 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:42:10 crc kubenswrapper[4926]: I1122 11:42:10.070729 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" exitCode=0 Nov 22 11:42:10 crc kubenswrapper[4926]: I1122 11:42:10.070771 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403"} Nov 22 11:42:10 crc kubenswrapper[4926]: I1122 11:42:10.070836 4926 scope.go:117] "RemoveContainer" containerID="9d6e4aef945d9e08737522f4731c0e4f60636bb537365c94ec1ad314c9a6e50e" Nov 22 11:42:10 crc kubenswrapper[4926]: I1122 11:42:10.071753 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:42:10 crc kubenswrapper[4926]: E1122 11:42:10.072226 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:42:13 crc kubenswrapper[4926]: I1122 11:42:13.452628 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:13 crc kubenswrapper[4926]: I1122 11:42:13.453301 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:14 crc kubenswrapper[4926]: I1122 11:42:14.498019 4926 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h5564" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="registry-server" probeResult="failure" output=< Nov 22 11:42:14 crc kubenswrapper[4926]: timeout: failed to connect service ":50051" within 1s Nov 22 11:42:14 crc kubenswrapper[4926]: > Nov 22 11:42:23 crc kubenswrapper[4926]: I1122 11:42:23.517699 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:23 crc kubenswrapper[4926]: I1122 11:42:23.576094 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:23 crc kubenswrapper[4926]: I1122 11:42:23.762220 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:24 crc kubenswrapper[4926]: I1122 11:42:24.582522 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:42:24 crc kubenswrapper[4926]: E1122 11:42:24.584331 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:42:25 crc kubenswrapper[4926]: I1122 11:42:25.204985 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h5564" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="registry-server" containerID="cri-o://d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445" gracePeriod=2 Nov 22 11:42:25 crc kubenswrapper[4926]: I1122 11:42:25.983158 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.045690 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") pod \"4336a042-f156-4c24-8981-63101df996f9\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.045777 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities\") pod \"4336a042-f156-4c24-8981-63101df996f9\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.045955 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5x4n\" (UniqueName: \"kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n\") pod \"4336a042-f156-4c24-8981-63101df996f9\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.046543 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities" (OuterVolumeSpecName: "utilities") pod "4336a042-f156-4c24-8981-63101df996f9" (UID: "4336a042-f156-4c24-8981-63101df996f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.047014 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.056205 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n" (OuterVolumeSpecName: "kube-api-access-n5x4n") pod "4336a042-f156-4c24-8981-63101df996f9" (UID: "4336a042-f156-4c24-8981-63101df996f9"). InnerVolumeSpecName "kube-api-access-n5x4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.148278 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4336a042-f156-4c24-8981-63101df996f9" (UID: "4336a042-f156-4c24-8981-63101df996f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.148834 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") pod \"4336a042-f156-4c24-8981-63101df996f9\" (UID: \"4336a042-f156-4c24-8981-63101df996f9\") " Nov 22 11:42:26 crc kubenswrapper[4926]: W1122 11:42:26.149031 4926 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4336a042-f156-4c24-8981-63101df996f9/volumes/kubernetes.io~empty-dir/catalog-content Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.149098 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4336a042-f156-4c24-8981-63101df996f9" (UID: "4336a042-f156-4c24-8981-63101df996f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.149757 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4336a042-f156-4c24-8981-63101df996f9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.149786 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5x4n\" (UniqueName: \"kubernetes.io/projected/4336a042-f156-4c24-8981-63101df996f9-kube-api-access-n5x4n\") on node \"crc\" DevicePath \"\"" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.234784 4926 generic.go:334] "Generic (PLEG): container finished" podID="4336a042-f156-4c24-8981-63101df996f9" containerID="d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445" exitCode=0 Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.234852 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerDied","Data":"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445"} Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.235005 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h5564" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.249173 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h5564" event={"ID":"4336a042-f156-4c24-8981-63101df996f9","Type":"ContainerDied","Data":"b1174ccbcd4c45bede47a0e6e8035777f33c760d0cd9ada1beb02c0da53a8748"} Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.249295 4926 scope.go:117] "RemoveContainer" containerID="d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.288382 4926 scope.go:117] "RemoveContainer" containerID="bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.304945 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.321975 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h5564"] Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.381469 4926 scope.go:117] "RemoveContainer" containerID="fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.418247 4926 scope.go:117] "RemoveContainer" containerID="d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445" Nov 22 11:42:26 crc kubenswrapper[4926]: E1122 11:42:26.418828 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445\": container with ID starting with d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445 not found: ID does not exist" containerID="d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.418879 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445"} err="failed to get container status \"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445\": rpc error: code = NotFound desc = could not find container \"d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445\": container with ID starting with d74cfd6c013668b8b5dae35b5e8e7c1b4a150dae1f4cb37d786168398dcef445 not found: ID does not exist" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.418919 4926 scope.go:117] "RemoveContainer" containerID="bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1" Nov 22 11:42:26 crc kubenswrapper[4926]: E1122 11:42:26.419248 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1\": container with ID starting with bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1 not found: ID does not exist" containerID="bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.419353 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1"} err="failed to get container status \"bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1\": rpc error: code = NotFound desc = could not find container \"bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1\": container with ID starting with bc789d1e3cb9e88cc6922c86b1522f8c68b064827046a380e584b6923108bea1 not found: ID does not exist" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.419450 4926 scope.go:117] "RemoveContainer" containerID="fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3" Nov 22 11:42:26 crc kubenswrapper[4926]: E1122 11:42:26.419748 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3\": container with ID starting with fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3 not found: ID does not exist" containerID="fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.419773 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3"} err="failed to get container status \"fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3\": rpc error: code = NotFound desc = could not find container \"fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3\": container with ID starting with fddf569212976fc20902ed2d6089a7ae49ce57f6b0a5ee7d593b0311170448d3 not found: ID does not exist" Nov 22 11:42:26 crc kubenswrapper[4926]: I1122 11:42:26.594317 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4336a042-f156-4c24-8981-63101df996f9" path="/var/lib/kubelet/pods/4336a042-f156-4c24-8981-63101df996f9/volumes" Nov 22 11:42:37 crc kubenswrapper[4926]: I1122 11:42:37.583012 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:42:37 crc kubenswrapper[4926]: E1122 11:42:37.584196 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:42:50 crc kubenswrapper[4926]: I1122 11:42:50.587375 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:42:50 crc kubenswrapper[4926]: E1122 11:42:50.588047 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:02 crc kubenswrapper[4926]: I1122 11:43:02.582779 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:43:02 crc kubenswrapper[4926]: E1122 11:43:02.583733 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:13 crc kubenswrapper[4926]: I1122 11:43:13.582038 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:43:13 crc kubenswrapper[4926]: E1122 11:43:13.583923 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.201462 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-kmzpd/must-gather-8dmck"] Nov 22 11:43:19 crc kubenswrapper[4926]: E1122 11:43:19.212762 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="extract-content" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.212879 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="extract-content" Nov 22 11:43:19 crc kubenswrapper[4926]: E1122 11:43:19.213020 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="extract-utilities" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.213082 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="extract-utilities" Nov 22 11:43:19 crc kubenswrapper[4926]: E1122 11:43:19.213152 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="registry-server" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.213207 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="registry-server" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.213461 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4336a042-f156-4c24-8981-63101df996f9" containerName="registry-server" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.214567 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.224059 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-kmzpd"/"kube-root-ca.crt" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.224457 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-kmzpd"/"openshift-service-ca.crt" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.239303 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-kmzpd/must-gather-8dmck"] Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.362156 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8gbq\" (UniqueName: \"kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.362235 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.463854 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8gbq\" (UniqueName: \"kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.463920 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.464380 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.482827 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8gbq\" (UniqueName: \"kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq\") pod \"must-gather-8dmck\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.554727 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:43:19 crc kubenswrapper[4926]: I1122 11:43:19.996576 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-kmzpd/must-gather-8dmck"] Nov 22 11:43:20 crc kubenswrapper[4926]: I1122 11:43:20.918033 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/must-gather-8dmck" event={"ID":"ffeb1cff-f4a8-416f-a082-a07741f82636","Type":"ContainerStarted","Data":"3edf8cd06d0cfedaed79f85abef6829ceb95889e06a9d2cc08860f893f7db8b4"} Nov 22 11:43:20 crc kubenswrapper[4926]: I1122 11:43:20.918850 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/must-gather-8dmck" event={"ID":"ffeb1cff-f4a8-416f-a082-a07741f82636","Type":"ContainerStarted","Data":"acaa2d716453e0542967a493ff8bdaf59a8e4c6697c28cf1059bfe70c3ab5920"} Nov 22 11:43:20 crc kubenswrapper[4926]: I1122 11:43:20.918875 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/must-gather-8dmck" event={"ID":"ffeb1cff-f4a8-416f-a082-a07741f82636","Type":"ContainerStarted","Data":"aa2f8bc084cab92c53d56d5dd5af609a233a93420b44c67020bd84825cc1602b"} Nov 22 11:43:20 crc kubenswrapper[4926]: I1122 11:43:20.939153 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-kmzpd/must-gather-8dmck" podStartSLOduration=1.939122706 podStartE2EDuration="1.939122706s" podCreationTimestamp="2025-11-22 11:43:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:43:20.934204024 +0000 UTC m=+3821.235809331" watchObservedRunningTime="2025-11-22 11:43:20.939122706 +0000 UTC m=+3821.240728033" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.245986 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lnfgf"] Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.249334 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.251356 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-kmzpd"/"default-dockercfg-xvn2c" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.352451 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7mzg\" (UniqueName: \"kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.352673 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.454827 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7mzg\" (UniqueName: \"kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.454943 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.455019 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.477789 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7mzg\" (UniqueName: \"kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg\") pod \"crc-debug-lnfgf\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.567968 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.582875 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:43:24 crc kubenswrapper[4926]: E1122 11:43:24.583102 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:24 crc kubenswrapper[4926]: W1122 11:43:24.593859 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34cc1b7f_c8e3_4866_8493_b07cd980b8b7.slice/crio-b7cff242d16775aa057f9d8e2ccc10b0bac2b2411390b70fdf8b9682875062bb WatchSource:0}: Error finding container b7cff242d16775aa057f9d8e2ccc10b0bac2b2411390b70fdf8b9682875062bb: Status 404 returned error can't find the container with id b7cff242d16775aa057f9d8e2ccc10b0bac2b2411390b70fdf8b9682875062bb Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.952505 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" event={"ID":"34cc1b7f-c8e3-4866-8493-b07cd980b8b7","Type":"ContainerStarted","Data":"5a09c359a681c3866cf3c0c6f46ed722a4042f35ed78e0d4ff79de97b232170f"} Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.952818 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" event={"ID":"34cc1b7f-c8e3-4866-8493-b07cd980b8b7","Type":"ContainerStarted","Data":"b7cff242d16775aa057f9d8e2ccc10b0bac2b2411390b70fdf8b9682875062bb"} Nov 22 11:43:24 crc kubenswrapper[4926]: I1122 11:43:24.965877 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" podStartSLOduration=0.96586124 podStartE2EDuration="965.86124ms" podCreationTimestamp="2025-11-22 11:43:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 11:43:24.965329455 +0000 UTC m=+3825.266934742" watchObservedRunningTime="2025-11-22 11:43:24.96586124 +0000 UTC m=+3825.267466517" Nov 22 11:43:38 crc kubenswrapper[4926]: I1122 11:43:38.582506 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:43:38 crc kubenswrapper[4926]: E1122 11:43:38.583318 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.494159 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.496425 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.518898 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.599105 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.599748 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whzhg\" (UniqueName: \"kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.599992 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.702528 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.702620 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.702717 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whzhg\" (UniqueName: \"kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.703392 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.703385 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.723220 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whzhg\" (UniqueName: \"kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg\") pod \"redhat-marketplace-hm6w2\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:51 crc kubenswrapper[4926]: I1122 11:43:51.897587 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:43:52 crc kubenswrapper[4926]: I1122 11:43:52.347853 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:43:52 crc kubenswrapper[4926]: W1122 11:43:52.358026 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4549b3af_8a8d_4faa_8aaa_3b23866fc11b.slice/crio-89efeb962549acacea21a8395dac23fa3c9c09a2145f535a7f2712e5ca78f870 WatchSource:0}: Error finding container 89efeb962549acacea21a8395dac23fa3c9c09a2145f535a7f2712e5ca78f870: Status 404 returned error can't find the container with id 89efeb962549acacea21a8395dac23fa3c9c09a2145f535a7f2712e5ca78f870 Nov 22 11:43:53 crc kubenswrapper[4926]: I1122 11:43:53.225486 4926 generic.go:334] "Generic (PLEG): container finished" podID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerID="d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35" exitCode=0 Nov 22 11:43:53 crc kubenswrapper[4926]: I1122 11:43:53.225577 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerDied","Data":"d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35"} Nov 22 11:43:53 crc kubenswrapper[4926]: I1122 11:43:53.226151 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerStarted","Data":"89efeb962549acacea21a8395dac23fa3c9c09a2145f535a7f2712e5ca78f870"} Nov 22 11:43:53 crc kubenswrapper[4926]: I1122 11:43:53.582107 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:43:53 crc kubenswrapper[4926]: E1122 11:43:53.582368 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:43:54 crc kubenswrapper[4926]: I1122 11:43:54.242312 4926 generic.go:334] "Generic (PLEG): container finished" podID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerID="d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4" exitCode=0 Nov 22 11:43:54 crc kubenswrapper[4926]: I1122 11:43:54.242385 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerDied","Data":"d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4"} Nov 22 11:43:55 crc kubenswrapper[4926]: I1122 11:43:55.269570 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerStarted","Data":"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6"} Nov 22 11:43:55 crc kubenswrapper[4926]: I1122 11:43:55.299978 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hm6w2" podStartSLOduration=2.88691005 podStartE2EDuration="4.299952842s" podCreationTimestamp="2025-11-22 11:43:51 +0000 UTC" firstStartedPulling="2025-11-22 11:43:53.228392988 +0000 UTC m=+3853.529998275" lastFinishedPulling="2025-11-22 11:43:54.64143578 +0000 UTC m=+3854.943041067" observedRunningTime="2025-11-22 11:43:55.29637187 +0000 UTC m=+3855.597977167" watchObservedRunningTime="2025-11-22 11:43:55.299952842 +0000 UTC m=+3855.601558149" Nov 22 11:43:57 crc kubenswrapper[4926]: I1122 11:43:57.290218 4926 generic.go:334] "Generic (PLEG): container finished" podID="34cc1b7f-c8e3-4866-8493-b07cd980b8b7" containerID="5a09c359a681c3866cf3c0c6f46ed722a4042f35ed78e0d4ff79de97b232170f" exitCode=0 Nov 22 11:43:57 crc kubenswrapper[4926]: I1122 11:43:57.290281 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" event={"ID":"34cc1b7f-c8e3-4866-8493-b07cd980b8b7","Type":"ContainerDied","Data":"5a09c359a681c3866cf3c0c6f46ed722a4042f35ed78e0d4ff79de97b232170f"} Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.409379 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.451579 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lnfgf"] Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.462605 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lnfgf"] Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.559672 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r7mzg\" (UniqueName: \"kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg\") pod \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.559805 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host\") pod \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\" (UID: \"34cc1b7f-c8e3-4866-8493-b07cd980b8b7\") " Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.559930 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host" (OuterVolumeSpecName: "host") pod "34cc1b7f-c8e3-4866-8493-b07cd980b8b7" (UID: "34cc1b7f-c8e3-4866-8493-b07cd980b8b7"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.560283 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.565703 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg" (OuterVolumeSpecName: "kube-api-access-r7mzg") pod "34cc1b7f-c8e3-4866-8493-b07cd980b8b7" (UID: "34cc1b7f-c8e3-4866-8493-b07cd980b8b7"). InnerVolumeSpecName "kube-api-access-r7mzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.596281 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34cc1b7f-c8e3-4866-8493-b07cd980b8b7" path="/var/lib/kubelet/pods/34cc1b7f-c8e3-4866-8493-b07cd980b8b7/volumes" Nov 22 11:43:58 crc kubenswrapper[4926]: E1122 11:43:58.630453 4926 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod34cc1b7f_c8e3_4866_8493_b07cd980b8b7.slice\": RecentStats: unable to find data in memory cache]" Nov 22 11:43:58 crc kubenswrapper[4926]: I1122 11:43:58.661859 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r7mzg\" (UniqueName: \"kubernetes.io/projected/34cc1b7f-c8e3-4866-8493-b07cd980b8b7-kube-api-access-r7mzg\") on node \"crc\" DevicePath \"\"" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.309720 4926 scope.go:117] "RemoveContainer" containerID="5a09c359a681c3866cf3c0c6f46ed722a4042f35ed78e0d4ff79de97b232170f" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.309741 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lnfgf" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.718401 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lg5h9"] Nov 22 11:43:59 crc kubenswrapper[4926]: E1122 11:43:59.719124 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34cc1b7f-c8e3-4866-8493-b07cd980b8b7" containerName="container-00" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.719143 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="34cc1b7f-c8e3-4866-8493-b07cd980b8b7" containerName="container-00" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.719428 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="34cc1b7f-c8e3-4866-8493-b07cd980b8b7" containerName="container-00" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.721225 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.723198 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-kmzpd"/"default-dockercfg-xvn2c" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.785046 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.785160 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mhnh\" (UniqueName: \"kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.886717 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.886845 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.886871 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mhnh\" (UniqueName: \"kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:43:59 crc kubenswrapper[4926]: I1122 11:43:59.909958 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mhnh\" (UniqueName: \"kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh\") pod \"crc-debug-lg5h9\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:44:00 crc kubenswrapper[4926]: I1122 11:44:00.041716 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:44:00 crc kubenswrapper[4926]: I1122 11:44:00.325742 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" event={"ID":"aa901a50-3872-4318-aeb1-65ca0fcfc259","Type":"ContainerStarted","Data":"c066f023896405651c314b9ca5922064ffb1c343753ad91c9b58bc94b20f096f"} Nov 22 11:44:00 crc kubenswrapper[4926]: I1122 11:44:00.326115 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" event={"ID":"aa901a50-3872-4318-aeb1-65ca0fcfc259","Type":"ContainerStarted","Data":"31855387dc95dbe5c5e64f412187c6c6233155c6dc75dcfe9c62cd56793825f1"} Nov 22 11:44:01 crc kubenswrapper[4926]: I1122 11:44:01.334921 4926 generic.go:334] "Generic (PLEG): container finished" podID="aa901a50-3872-4318-aeb1-65ca0fcfc259" containerID="c066f023896405651c314b9ca5922064ffb1c343753ad91c9b58bc94b20f096f" exitCode=0 Nov 22 11:44:01 crc kubenswrapper[4926]: I1122 11:44:01.335048 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" event={"ID":"aa901a50-3872-4318-aeb1-65ca0fcfc259","Type":"ContainerDied","Data":"c066f023896405651c314b9ca5922064ffb1c343753ad91c9b58bc94b20f096f"} Nov 22 11:44:01 crc kubenswrapper[4926]: I1122 11:44:01.898375 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:01 crc kubenswrapper[4926]: I1122 11:44:01.898416 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:01 crc kubenswrapper[4926]: I1122 11:44:01.993656 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.398986 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.467679 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.481045 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.517253 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lg5h9"] Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.525870 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-lg5h9"] Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.537318 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host\") pod \"aa901a50-3872-4318-aeb1-65ca0fcfc259\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.537453 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host" (OuterVolumeSpecName: "host") pod "aa901a50-3872-4318-aeb1-65ca0fcfc259" (UID: "aa901a50-3872-4318-aeb1-65ca0fcfc259"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.537634 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mhnh\" (UniqueName: \"kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh\") pod \"aa901a50-3872-4318-aeb1-65ca0fcfc259\" (UID: \"aa901a50-3872-4318-aeb1-65ca0fcfc259\") " Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.538095 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/aa901a50-3872-4318-aeb1-65ca0fcfc259-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.546498 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh" (OuterVolumeSpecName: "kube-api-access-4mhnh") pod "aa901a50-3872-4318-aeb1-65ca0fcfc259" (UID: "aa901a50-3872-4318-aeb1-65ca0fcfc259"). InnerVolumeSpecName "kube-api-access-4mhnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.596736 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa901a50-3872-4318-aeb1-65ca0fcfc259" path="/var/lib/kubelet/pods/aa901a50-3872-4318-aeb1-65ca0fcfc259/volumes" Nov 22 11:44:02 crc kubenswrapper[4926]: I1122 11:44:02.639857 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mhnh\" (UniqueName: \"kubernetes.io/projected/aa901a50-3872-4318-aeb1-65ca0fcfc259-kube-api-access-4mhnh\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.358699 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-lg5h9" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.358781 4926 scope.go:117] "RemoveContainer" containerID="c066f023896405651c314b9ca5922064ffb1c343753ad91c9b58bc94b20f096f" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.754879 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-wbmbl"] Nov 22 11:44:03 crc kubenswrapper[4926]: E1122 11:44:03.755716 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa901a50-3872-4318-aeb1-65ca0fcfc259" containerName="container-00" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.755737 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa901a50-3872-4318-aeb1-65ca0fcfc259" containerName="container-00" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.756059 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa901a50-3872-4318-aeb1-65ca0fcfc259" containerName="container-00" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.756843 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.760588 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-kmzpd"/"default-dockercfg-xvn2c" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.864042 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.864275 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8npzx\" (UniqueName: \"kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.967391 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.967539 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:03 crc kubenswrapper[4926]: I1122 11:44:03.967601 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8npzx\" (UniqueName: \"kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:04 crc kubenswrapper[4926]: I1122 11:44:04.001053 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8npzx\" (UniqueName: \"kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx\") pod \"crc-debug-wbmbl\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:04 crc kubenswrapper[4926]: I1122 11:44:04.087476 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:04 crc kubenswrapper[4926]: W1122 11:44:04.147292 4926 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85d44158_a202_4921_a243_c79e9219275e.slice/crio-a91b5ef8251980793f9d0ae2f21afeb706aac3db28ee9bdc589004f8045f6bfd WatchSource:0}: Error finding container a91b5ef8251980793f9d0ae2f21afeb706aac3db28ee9bdc589004f8045f6bfd: Status 404 returned error can't find the container with id a91b5ef8251980793f9d0ae2f21afeb706aac3db28ee9bdc589004f8045f6bfd Nov 22 11:44:04 crc kubenswrapper[4926]: I1122 11:44:04.369603 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" event={"ID":"85d44158-a202-4921-a243-c79e9219275e","Type":"ContainerStarted","Data":"a91b5ef8251980793f9d0ae2f21afeb706aac3db28ee9bdc589004f8045f6bfd"} Nov 22 11:44:04 crc kubenswrapper[4926]: I1122 11:44:04.371459 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hm6w2" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="registry-server" containerID="cri-o://c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6" gracePeriod=2 Nov 22 11:44:04 crc kubenswrapper[4926]: I1122 11:44:04.846566 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.005729 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities\") pod \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.005817 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content\") pod \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.005929 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-whzhg\" (UniqueName: \"kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg\") pod \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\" (UID: \"4549b3af-8a8d-4faa-8aaa-3b23866fc11b\") " Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.018519 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities" (OuterVolumeSpecName: "utilities") pod "4549b3af-8a8d-4faa-8aaa-3b23866fc11b" (UID: "4549b3af-8a8d-4faa-8aaa-3b23866fc11b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.019766 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg" (OuterVolumeSpecName: "kube-api-access-whzhg") pod "4549b3af-8a8d-4faa-8aaa-3b23866fc11b" (UID: "4549b3af-8a8d-4faa-8aaa-3b23866fc11b"). InnerVolumeSpecName "kube-api-access-whzhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.027983 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4549b3af-8a8d-4faa-8aaa-3b23866fc11b" (UID: "4549b3af-8a8d-4faa-8aaa-3b23866fc11b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.108047 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.108078 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.108091 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-whzhg\" (UniqueName: \"kubernetes.io/projected/4549b3af-8a8d-4faa-8aaa-3b23866fc11b-kube-api-access-whzhg\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.381449 4926 generic.go:334] "Generic (PLEG): container finished" podID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerID="c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6" exitCode=0 Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.381509 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hm6w2" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.381518 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerDied","Data":"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6"} Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.381652 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hm6w2" event={"ID":"4549b3af-8a8d-4faa-8aaa-3b23866fc11b","Type":"ContainerDied","Data":"89efeb962549acacea21a8395dac23fa3c9c09a2145f535a7f2712e5ca78f870"} Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.381704 4926 scope.go:117] "RemoveContainer" containerID="c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.384773 4926 generic.go:334] "Generic (PLEG): container finished" podID="85d44158-a202-4921-a243-c79e9219275e" containerID="f19c6ce07eb546d460d8f5b419dfc2459c58d8295a2f29066df18f0a30c3036d" exitCode=0 Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.384811 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" event={"ID":"85d44158-a202-4921-a243-c79e9219275e","Type":"ContainerDied","Data":"f19c6ce07eb546d460d8f5b419dfc2459c58d8295a2f29066df18f0a30c3036d"} Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.418358 4926 scope.go:117] "RemoveContainer" containerID="d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.446959 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.448725 4926 scope.go:117] "RemoveContainer" containerID="d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.452957 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hm6w2"] Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.460936 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-wbmbl"] Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.468163 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-kmzpd/crc-debug-wbmbl"] Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.509825 4926 scope.go:117] "RemoveContainer" containerID="c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6" Nov 22 11:44:05 crc kubenswrapper[4926]: E1122 11:44:05.510248 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6\": container with ID starting with c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6 not found: ID does not exist" containerID="c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.510277 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6"} err="failed to get container status \"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6\": rpc error: code = NotFound desc = could not find container \"c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6\": container with ID starting with c408e351672e7e916f2ac6addc47924516a73383929276b22520725c1280d4c6 not found: ID does not exist" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.510297 4926 scope.go:117] "RemoveContainer" containerID="d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4" Nov 22 11:44:05 crc kubenswrapper[4926]: E1122 11:44:05.510619 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4\": container with ID starting with d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4 not found: ID does not exist" containerID="d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.510641 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4"} err="failed to get container status \"d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4\": rpc error: code = NotFound desc = could not find container \"d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4\": container with ID starting with d91e07cd7c93c96c36984208b3a4e6952985ba2fe5f82d5a31bdf996ad0e50b4 not found: ID does not exist" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.510653 4926 scope.go:117] "RemoveContainer" containerID="d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35" Nov 22 11:44:05 crc kubenswrapper[4926]: E1122 11:44:05.510923 4926 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35\": container with ID starting with d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35 not found: ID does not exist" containerID="d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.510966 4926 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35"} err="failed to get container status \"d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35\": rpc error: code = NotFound desc = could not find container \"d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35\": container with ID starting with d5ac79ec88018ed8e76b9dcdff6898912e6c03f7e826d8f8a8954620df5a7a35 not found: ID does not exist" Nov 22 11:44:05 crc kubenswrapper[4926]: I1122 11:44:05.582164 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:44:05 crc kubenswrapper[4926]: E1122 11:44:05.582370 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.483228 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.594534 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" path="/var/lib/kubelet/pods/4549b3af-8a8d-4faa-8aaa-3b23866fc11b/volumes" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.639704 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8npzx\" (UniqueName: \"kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx\") pod \"85d44158-a202-4921-a243-c79e9219275e\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.640134 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host\") pod \"85d44158-a202-4921-a243-c79e9219275e\" (UID: \"85d44158-a202-4921-a243-c79e9219275e\") " Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.640270 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host" (OuterVolumeSpecName: "host") pod "85d44158-a202-4921-a243-c79e9219275e" (UID: "85d44158-a202-4921-a243-c79e9219275e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.641088 4926 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/85d44158-a202-4921-a243-c79e9219275e-host\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.645569 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx" (OuterVolumeSpecName: "kube-api-access-8npzx") pod "85d44158-a202-4921-a243-c79e9219275e" (UID: "85d44158-a202-4921-a243-c79e9219275e"). InnerVolumeSpecName "kube-api-access-8npzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:44:06 crc kubenswrapper[4926]: I1122 11:44:06.742622 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8npzx\" (UniqueName: \"kubernetes.io/projected/85d44158-a202-4921-a243-c79e9219275e-kube-api-access-8npzx\") on node \"crc\" DevicePath \"\"" Nov 22 11:44:07 crc kubenswrapper[4926]: I1122 11:44:07.403756 4926 scope.go:117] "RemoveContainer" containerID="f19c6ce07eb546d460d8f5b419dfc2459c58d8295a2f29066df18f0a30c3036d" Nov 22 11:44:07 crc kubenswrapper[4926]: I1122 11:44:07.403783 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/crc-debug-wbmbl" Nov 22 11:44:08 crc kubenswrapper[4926]: I1122 11:44:08.590439 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85d44158-a202-4921-a243-c79e9219275e" path="/var/lib/kubelet/pods/85d44158-a202-4921-a243-c79e9219275e/volumes" Nov 22 11:44:18 crc kubenswrapper[4926]: I1122 11:44:18.582803 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:44:18 crc kubenswrapper[4926]: E1122 11:44:18.585627 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:44:28 crc kubenswrapper[4926]: I1122 11:44:28.684714 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-d84d6cb4b-w4kcl_d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b/barbican-api/0.log" Nov 22 11:44:28 crc kubenswrapper[4926]: I1122 11:44:28.888116 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-d84d6cb4b-w4kcl_d0482fa7-3e3e-4c94-8bb4-76e4aa4f1d7b/barbican-api-log/0.log" Nov 22 11:44:28 crc kubenswrapper[4926]: I1122 11:44:28.965095 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5b6fb59ff8-cgr6h_5659fc64-a862-4d05-989e-4e667a4bb792/barbican-keystone-listener/0.log" Nov 22 11:44:28 crc kubenswrapper[4926]: I1122 11:44:28.980834 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5b6fb59ff8-cgr6h_5659fc64-a862-4d05-989e-4e667a4bb792/barbican-keystone-listener-log/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.105994 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6c6596fd55-5fshh_58c72eaf-f8f2-4333-8057-a9237457d73c/barbican-worker/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.144093 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6c6596fd55-5fshh_58c72eaf-f8f2-4333-8057-a9237457d73c/barbican-worker-log/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.298663 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-ch87n_9189297a-e5e2-47b3-9cf0-ac932c80f3bb/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.354790 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/ceilometer-central-agent/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.446935 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/ceilometer-notification-agent/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.524414 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/proxy-httpd/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.526710 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_5e5f48cd-d405-4431-8ab1-de058f7c0f52/sg-core/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.656743 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8232e2d5-3714-47a4-9739-2e370a17300b/cinder-api/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.717189 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_8232e2d5-3714-47a4-9739-2e370a17300b/cinder-api-log/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.888246 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e8c6c748-bba9-4298-b0de-745cd26ccec4/probe/0.log" Nov 22 11:44:29 crc kubenswrapper[4926]: I1122 11:44:29.926272 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_e8c6c748-bba9-4298-b0de-745cd26ccec4/cinder-scheduler/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.014388 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-76jbg_ab012855-82a0-4f87-97a7-e3c2d1490dda/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.143620 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-mk5df_e048ed0c-3971-4ed9-ba3a-ea97d4cd82fb/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.177918 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/init/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.367860 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/init/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.395501 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-m2jnh_17f914e9-40ef-4428-817c-6f72279f844f/dnsmasq-dns/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.412394 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7v496_fae080f1-2e5d-463a-ae8e-0c29025a62a3/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.574380 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_699ad142-80cd-4ee2-86ca-87c22cc7f39b/glance-log/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.615183 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_699ad142-80cd-4ee2-86ca-87c22cc7f39b/glance-httpd/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.771635 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_eaf541eb-314b-4f78-bdcc-66f5b43b0ed5/glance-httpd/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.796261 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_eaf541eb-314b-4f78-bdcc-66f5b43b0ed5/glance-log/0.log" Nov 22 11:44:30 crc kubenswrapper[4926]: I1122 11:44:30.961587 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-86dd5d599b-jndzq_a7c08c13-5c9c-42ac-8fdc-e651c26d97fc/horizon/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.127332 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-gqff6_f456f1f9-7676-4426-810c-6057111ed942/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.277432 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-jv8ww_4cd61881-efff-46ae-a9b8-ba641538d8e1/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.296530 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-86dd5d599b-jndzq_a7c08c13-5c9c-42ac-8fdc-e651c26d97fc/horizon-log/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.488332 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29396821-jtvch_a8024291-de1f-49c8-bac5-b4d37978639d/keystone-cron/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.572076 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-68769dd845-84s2z_3c8571ff-d236-4cc6-aebe-ffa8be3ef604/keystone-api/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.581965 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:44:31 crc kubenswrapper[4926]: E1122 11:44:31.582198 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.710171 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_a5fc01d6-133f-4899-926b-3e4ff8c68f0b/kube-state-metrics/0.log" Nov 22 11:44:31 crc kubenswrapper[4926]: I1122 11:44:31.747229 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-s4zsl_64e25bf4-8746-413f-a28b-264ddfb9feff/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:32 crc kubenswrapper[4926]: I1122 11:44:32.076072 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f7c4dcf85-jl8kd_3260200f-bc21-4521-9a62-2f67ab26f0df/neutron-api/0.log" Nov 22 11:44:32 crc kubenswrapper[4926]: I1122 11:44:32.105022 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5f7c4dcf85-jl8kd_3260200f-bc21-4521-9a62-2f67ab26f0df/neutron-httpd/0.log" Nov 22 11:44:32 crc kubenswrapper[4926]: I1122 11:44:32.296277 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-f5tdc_e65a3423-36b6-48c5-b170-989f64801105/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:32 crc kubenswrapper[4926]: I1122 11:44:32.676642 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_95e7c80b-edf7-42be-892c-11557c816271/nova-api-log/0.log" Nov 22 11:44:32 crc kubenswrapper[4926]: I1122 11:44:32.821712 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_cb0852ee-dc75-43ee-88ec-7343197eca5f/nova-cell0-conductor-conductor/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.069668 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cea60836-1c25-4c6c-8f9e-e64ab97d459a/nova-cell1-conductor-conductor/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.166317 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_95e7c80b-edf7-42be-892c-11557c816271/nova-api-api/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.188052 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_ccc7d0f8-4767-44f3-8dfa-d83b48fc23a2/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.316610 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-554th_61e72bc5-b152-4df1-95ee-bb47a81514ff/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.482614 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_468e2351-8b2d-4e90-bf03-218570d63fd9/nova-metadata-log/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.793540 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/mysql-bootstrap/0.log" Nov 22 11:44:33 crc kubenswrapper[4926]: I1122 11:44:33.918527 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_360b8f0a-6a7b-4772-839f-cab107433443/nova-scheduler-scheduler/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.033202 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/galera/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.033518 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_01026c46-6589-4761-80f4-8bb210d71fd9/mysql-bootstrap/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.210067 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/mysql-bootstrap/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.452407 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/mysql-bootstrap/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.467520 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_94fdd08c-2339-4d12-90bf-fbd407185f34/galera/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.631925 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_6c186926-85fd-4c52-9910-48a3c70ae9eb/openstackclient/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.684209 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-fvp8n_d2154a83-1eaa-44bc-ade8-754245e919b2/openstack-network-exporter/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.758950 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_468e2351-8b2d-4e90-bf03-218570d63fd9/nova-metadata-metadata/0.log" Nov 22 11:44:34 crc kubenswrapper[4926]: I1122 11:44:34.888445 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server-init/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.078008 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server-init/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.123143 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovsdb-server/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.162390 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hrnx7_c35b6e33-72ea-4631-8fb0-e21ed5b6b503/ovs-vswitchd/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.298454 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-pwfdl_631757e2-e40e-4cc6-a2a3-601c749669b2/ovn-controller/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.409566 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-xgx5g_060a3d68-c5b3-4788-8c63-ce0b6d67acc5/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.535806 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d90df493-f9a0-4774-bd2e-6b96bbfebf31/openstack-network-exporter/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.571986 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_d90df493-f9a0-4774-bd2e-6b96bbfebf31/ovn-northd/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.695008 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_50a6898f-08ef-48de-bcc5-35b49915cff6/openstack-network-exporter/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.755708 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_50a6898f-08ef-48de-bcc5-35b49915cff6/ovsdbserver-nb/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.836717 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0/openstack-network-exporter/0.log" Nov 22 11:44:35 crc kubenswrapper[4926]: I1122 11:44:35.892860 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_49d1aa9f-06e0-48b0-b0a7-ab459f6ed4d0/ovsdbserver-sb/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.089244 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65b67ff7d-d2fkp_c5fcfb96-741e-467c-971f-762618aa54d5/placement-api/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.155628 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-65b67ff7d-d2fkp_c5fcfb96-741e-467c-971f-762618aa54d5/placement-log/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.180775 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/setup-container/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.470295 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/setup-container/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.496391 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/setup-container/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.517843 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_5bd13931-4b28-4235-a779-aea2a515351e/rabbitmq/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.724065 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-hvcbq_07bbb761-300d-4592-9c67-27e85a79e770/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.735175 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/setup-container/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.769949 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9bcfa04c-3c9e-47a5-946e-d7c42d3cefda/rabbitmq/0.log" Nov 22 11:44:36 crc kubenswrapper[4926]: I1122 11:44:36.951394 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-k2x5v_5241dfa6-bfdd-495c-8853-135648e0c112/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.060314 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-8gt7p_9c3831ca-e426-4b08-ad83-050cbedbd547/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.224880 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-td6g4_ca7c4fa4-7055-4d7a-9147-ae64dd195ae1/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.233299 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-fvg2t_474538df-0433-4fb9-b2c2-ed291078d237/ssh-known-hosts-edpm-deployment/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.477472 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-74969bfb89-zx2cm_033cb6e2-4f4b-46e3-a28f-61f904e65d4b/proxy-server/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.554983 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-74969bfb89-zx2cm_033cb6e2-4f4b-46e3-a28f-61f904e65d4b/proxy-httpd/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.569445 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-bn9s6_06d59088-e96c-45eb-aba8-00382ceaa48a/swift-ring-rebalance/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.662232 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-auditor/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.927723 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-reaper/0.log" Nov 22 11:44:37 crc kubenswrapper[4926]: I1122 11:44:37.944793 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-replicator/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.028451 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-auditor/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.061974 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/account-server/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.160734 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-replicator/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.177451 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-server/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.209544 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/container-updater/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.336656 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-auditor/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.341773 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-expirer/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.429973 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-replicator/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.443096 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-server/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.524999 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/object-updater/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.538834 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/rsync/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.641136 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_2d0b3ca4-531c-4c3e-9af7-f5d2b65c7251/swift-recon-cron/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.756155 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-sfz4n_b1c88ed0-29ad-4a8c-8708-0b2a1d5853a4/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.871789 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_588c20c1-2673-4c55-9dc4-1e20448b5adb/tempest-tests-tempest-tests-runner/0.log" Nov 22 11:44:38 crc kubenswrapper[4926]: I1122 11:44:38.904351 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2afc1e0b-105d-4e75-b966-4a8bdff5f07f/test-operator-logs-container/0.log" Nov 22 11:44:39 crc kubenswrapper[4926]: I1122 11:44:39.084355 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bj2hp_ea024223-c658-4c22-9318-8eb14052b38f/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 11:44:46 crc kubenswrapper[4926]: I1122 11:44:46.584604 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:44:46 crc kubenswrapper[4926]: E1122 11:44:46.585206 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:44:48 crc kubenswrapper[4926]: I1122 11:44:48.712766 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6f000ebf-57ae-4f00-9aaf-7583a9ec4abb/memcached/0.log" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.159778 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52"] Nov 22 11:45:00 crc kubenswrapper[4926]: E1122 11:45:00.160736 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d44158-a202-4921-a243-c79e9219275e" containerName="container-00" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.160751 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d44158-a202-4921-a243-c79e9219275e" containerName="container-00" Nov 22 11:45:00 crc kubenswrapper[4926]: E1122 11:45:00.160763 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="extract-content" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.160769 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="extract-content" Nov 22 11:45:00 crc kubenswrapper[4926]: E1122 11:45:00.160784 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="extract-utilities" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.160791 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="extract-utilities" Nov 22 11:45:00 crc kubenswrapper[4926]: E1122 11:45:00.160803 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="registry-server" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.160809 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="registry-server" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.161009 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="85d44158-a202-4921-a243-c79e9219275e" containerName="container-00" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.161032 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="4549b3af-8a8d-4faa-8aaa-3b23866fc11b" containerName="registry-server" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.161632 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.165078 4926 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.165752 4926 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.211801 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52"] Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.222985 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.223189 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.223299 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2v56\" (UniqueName: \"kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.325274 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.325763 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.325843 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2v56\" (UniqueName: \"kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.327661 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.330185 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.341776 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2v56\" (UniqueName: \"kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56\") pod \"collect-profiles-29396865-gqs52\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.523917 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.588599 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:45:00 crc kubenswrapper[4926]: E1122 11:45:00.589090 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:45:00 crc kubenswrapper[4926]: I1122 11:45:00.988030 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52"] Nov 22 11:45:01 crc kubenswrapper[4926]: I1122 11:45:01.909818 4926 generic.go:334] "Generic (PLEG): container finished" podID="18b96fb2-d21b-4f30-b1f6-b64204879976" containerID="2d8e573ec487d02a248c0ffcb93939133882d810fb69e1de802e3bc07021c028" exitCode=0 Nov 22 11:45:01 crc kubenswrapper[4926]: I1122 11:45:01.909911 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" event={"ID":"18b96fb2-d21b-4f30-b1f6-b64204879976","Type":"ContainerDied","Data":"2d8e573ec487d02a248c0ffcb93939133882d810fb69e1de802e3bc07021c028"} Nov 22 11:45:01 crc kubenswrapper[4926]: I1122 11:45:01.910294 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" event={"ID":"18b96fb2-d21b-4f30-b1f6-b64204879976","Type":"ContainerStarted","Data":"5101bd48183bf86e6c031ff727d41a0ae0cfa49a88fc6829629fec08a2064c69"} Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.244987 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.289057 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.381976 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2v56\" (UniqueName: \"kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56\") pod \"18b96fb2-d21b-4f30-b1f6-b64204879976\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.382070 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume\") pod \"18b96fb2-d21b-4f30-b1f6-b64204879976\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.382143 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume\") pod \"18b96fb2-d21b-4f30-b1f6-b64204879976\" (UID: \"18b96fb2-d21b-4f30-b1f6-b64204879976\") " Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.382793 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume" (OuterVolumeSpecName: "config-volume") pod "18b96fb2-d21b-4f30-b1f6-b64204879976" (UID: "18b96fb2-d21b-4f30-b1f6-b64204879976"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.387926 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "18b96fb2-d21b-4f30-b1f6-b64204879976" (UID: "18b96fb2-d21b-4f30-b1f6-b64204879976"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.393103 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56" (OuterVolumeSpecName: "kube-api-access-b2v56") pod "18b96fb2-d21b-4f30-b1f6-b64204879976" (UID: "18b96fb2-d21b-4f30-b1f6-b64204879976"). InnerVolumeSpecName "kube-api-access-b2v56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.429869 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.456659 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.474571 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.484929 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2v56\" (UniqueName: \"kubernetes.io/projected/18b96fb2-d21b-4f30-b1f6-b64204879976-kube-api-access-b2v56\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.484962 4926 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/18b96fb2-d21b-4f30-b1f6-b64204879976-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.484975 4926 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/18b96fb2-d21b-4f30-b1f6-b64204879976-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.588780 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/util/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.617552 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/pull/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.643781 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_236e4f7cddf341c99d6684fdc49c2b84b27b70fb0d54817b1a8864f51fld44v_0379bb7b-1539-4a2f-888d-fc7bd9828a33/extract/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.778280 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-697c78f669-dfq9w_f8406cda-67f4-425a-83f1-ab90cf4ebf0c/kube-rbac-proxy/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.872526 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-697c78f669-dfq9w_f8406cda-67f4-425a-83f1-ab90cf4ebf0c/manager/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.911397 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-cttxc_644aaf3f-48c2-4789-9775-18ed3ae24fd7/kube-rbac-proxy/0.log" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.933375 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" event={"ID":"18b96fb2-d21b-4f30-b1f6-b64204879976","Type":"ContainerDied","Data":"5101bd48183bf86e6c031ff727d41a0ae0cfa49a88fc6829629fec08a2064c69"} Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.933411 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5101bd48183bf86e6c031ff727d41a0ae0cfa49a88fc6829629fec08a2064c69" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.933464 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396865-gqs52" Nov 22 11:45:03 crc kubenswrapper[4926]: I1122 11:45:03.985991 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6498cbf48f-cttxc_644aaf3f-48c2-4789-9775-18ed3ae24fd7/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.064261 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-8p4kn_02d4d3c4-4951-4f41-8605-239ac95dae92/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.105055 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-767ccfd65f-8p4kn_02d4d3c4-4951-4f41-8605-239ac95dae92/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.210843 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-qshq6_3ebbbdf8-da82-4f02-a8f5-509de3b56721/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.318077 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-7969689c84-qshq6_3ebbbdf8-da82-4f02-a8f5-509de3b56721/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.365909 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk"] Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.376449 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396820-t4glk"] Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.380767 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7869d7c46b-np8cn_bf6721b8-a1f6-4d27-ad5a-c090e2dc8806/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.421164 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-7869d7c46b-np8cn_bf6721b8-a1f6-4d27-ad5a-c090e2dc8806/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.503769 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-hls4w_f3502c04-7310-4659-aa47-b91b71ff3b30/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.568860 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-598f69df5d-hls4w_f3502c04-7310-4659-aa47-b91b71ff3b30/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.597787 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ecb12c0-9db0-426f-9160-214011fc3f9c" path="/var/lib/kubelet/pods/7ecb12c0-9db0-426f-9160-214011fc3f9c/volumes" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.690769 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7875d8bb94-pr7tn_dc80ed79-7a34-4756-b5ed-0b3cda532910/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.822190 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-7875d8bb94-pr7tn_dc80ed79-7a34-4756-b5ed-0b3cda532910/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.835985 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-7j69z_46528db3-6717-4abb-a779-33290ae0c986/kube-rbac-proxy/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.889404 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-99b499f4-7j69z_46528db3-6717-4abb-a779-33290ae0c986/manager/0.log" Nov 22 11:45:04 crc kubenswrapper[4926]: I1122 11:45:04.974237 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-dvdzj_7e28261c-db91-4143-a418-1114acf60dc0/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.047727 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7454b96578-dvdzj_7e28261c-db91-4143-a418-1114acf60dc0/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.138372 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-r2ctj_0996e99c-8565-426e-afa0-8a52ff2bee16/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.166697 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-58f887965d-r2ctj_0996e99c-8565-426e-afa0-8a52ff2bee16/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.252038 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5f449d8fbc-bfqxw_72b66cb9-cb2a-4977-a3f1-3fe22508641e/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.321452 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-5f449d8fbc-bfqxw_72b66cb9-cb2a-4977-a3f1-3fe22508641e/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.434217 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-669dc6ff5f-crkkv_0126a31b-68bb-46a7-8f3a-f34ad5d74e6d/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.457546 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-669dc6ff5f-crkkv_0126a31b-68bb-46a7-8f3a-f34ad5d74e6d/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.572743 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zjkmb_8b039ede-62fc-47ed-83ed-672e756887a1/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.688839 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-c5z8p_71c1201e-62bb-4d32-945b-80cda1ff41ac/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.703091 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-cfbb9c588-zjkmb_8b039ede-62fc-47ed-83ed-672e756887a1/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.805634 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-54cfbf4c7d-c5z8p_71c1201e-62bb-4d32-945b-80cda1ff41ac/manager/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.922913 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss_4c6f9a58-d6f5-426f-bb8d-e019401a015a/kube-rbac-proxy/0.log" Nov 22 11:45:05 crc kubenswrapper[4926]: I1122 11:45:05.970562 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7cfbb8b596qtsss_4c6f9a58-d6f5-426f-bb8d-e019401a015a/manager/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.186189 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-76ffdb7f4-g9zx8_391d1daa-3379-45e6-be55-fb2c3e1d304a/kube-rbac-proxy/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.210040 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-64844fbb8-hngj4_d292d5fa-12ea-40d0-a6df-1f6e9f5c8059/kube-rbac-proxy/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.503008 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-56rwr_cc73291c-a3b1-4641-95a2-454130fe25f5/registry-server/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.549844 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-76ffdb7f4-g9zx8_391d1daa-3379-45e6-be55-fb2c3e1d304a/operator/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.710505 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-587df66445-2hwd8_355d4b1d-9137-4cf5-aac8-e373d1b7d696/kube-rbac-proxy/0.log" Nov 22 11:45:06 crc kubenswrapper[4926]: I1122 11:45:06.805557 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-587df66445-2hwd8_355d4b1d-9137-4cf5-aac8-e373d1b7d696/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.020801 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-pgzg9_ec00fa84-7dd0-46d6-b9f2-4a7b687b347b/kube-rbac-proxy/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.025061 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b797b8dff-pgzg9_ec00fa84-7dd0-46d6-b9f2-4a7b687b347b/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.124333 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-jdqsq_3ab27f1b-e328-46d1-b9e5-b29e2caedef6/operator/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.221905 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-64844fbb8-hngj4_d292d5fa-12ea-40d0-a6df-1f6e9f5c8059/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.280929 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-j7xg8_e30ebbd3-daab-4ee4-acea-631c15b5045b/kube-rbac-proxy/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.344228 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d656998f4-j7xg8_e30ebbd3-daab-4ee4-acea-631c15b5045b/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.404229 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-95jv5_6c866ac0-e106-4a90-a223-435b244634b5/kube-rbac-proxy/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.480495 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-6d4bf84b58-95jv5_6c866ac0-e106-4a90-a223-435b244634b5/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.522477 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6f44bf845f-7vhg5_dab1442d-6ad4-4d03-b520-a12d7a4d6c9d/manager/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.567078 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-6f44bf845f-7vhg5_dab1442d-6ad4-4d03-b520-a12d7a4d6c9d/kube-rbac-proxy/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.645484 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-85494d54fc-czf4h_3947549a-e067-4135-ba36-1e2663db15c0/kube-rbac-proxy/0.log" Nov 22 11:45:07 crc kubenswrapper[4926]: I1122 11:45:07.659016 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-85494d54fc-czf4h_3947549a-e067-4135-ba36-1e2663db15c0/manager/0.log" Nov 22 11:45:11 crc kubenswrapper[4926]: I1122 11:45:11.582870 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:45:11 crc kubenswrapper[4926]: E1122 11:45:11.583953 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:45:23 crc kubenswrapper[4926]: I1122 11:45:23.177389 4926 scope.go:117] "RemoveContainer" containerID="de95b4e1ddd886888bca8b19ea58c41b74c64617926a49c331fe90294b3078c9" Nov 22 11:45:23 crc kubenswrapper[4926]: I1122 11:45:23.315839 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-rkbd7_0fd98b4c-0217-4784-8bbd-b0ec0680a611/control-plane-machine-set-operator/0.log" Nov 22 11:45:23 crc kubenswrapper[4926]: I1122 11:45:23.484499 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-jkqsx_e0882887-a6d9-4aac-a7d7-c14b934298e2/kube-rbac-proxy/0.log" Nov 22 11:45:23 crc kubenswrapper[4926]: I1122 11:45:23.487377 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-jkqsx_e0882887-a6d9-4aac-a7d7-c14b934298e2/machine-api-operator/0.log" Nov 22 11:45:26 crc kubenswrapper[4926]: I1122 11:45:26.582573 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:45:26 crc kubenswrapper[4926]: E1122 11:45:26.583466 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:45:35 crc kubenswrapper[4926]: I1122 11:45:35.767576 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-xpqf5_473a1f27-e3c6-4c74-9daf-da6ae42cc754/cert-manager-controller/0.log" Nov 22 11:45:35 crc kubenswrapper[4926]: I1122 11:45:35.940974 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-zzrcx_46c8cced-eb2e-409c-9923-f28c5924e5b1/cert-manager-cainjector/0.log" Nov 22 11:45:35 crc kubenswrapper[4926]: I1122 11:45:35.953542 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-w4h2r_ffec6625-ab3c-4e67-af68-afdbe4210730/cert-manager-webhook/0.log" Nov 22 11:45:40 crc kubenswrapper[4926]: I1122 11:45:40.594497 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:45:40 crc kubenswrapper[4926]: E1122 11:45:40.595316 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.015260 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-slspm_1d24ff8c-3a27-452a-a473-90e139c30740/nmstate-console-plugin/0.log" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.142558 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-lt5kb_cae34914-f89d-4a66-bb66-901024424e79/nmstate-handler/0.log" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.196446 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-vfdhw_1b73e906-db9e-454d-8316-2266a666d683/kube-rbac-proxy/0.log" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.247165 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-vfdhw_1b73e906-db9e-454d-8316-2266a666d683/nmstate-metrics/0.log" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.401195 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-2m244_251bb93a-68e8-4e17-98ac-0dc9c7f31ace/nmstate-operator/0.log" Nov 22 11:45:49 crc kubenswrapper[4926]: I1122 11:45:49.463654 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-zttzw_c24318e9-ff38-4221-8931-046cb1c39368/nmstate-webhook/0.log" Nov 22 11:45:53 crc kubenswrapper[4926]: I1122 11:45:53.582834 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:45:53 crc kubenswrapper[4926]: E1122 11:45:53.584065 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:46:04 crc kubenswrapper[4926]: I1122 11:46:04.582201 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:46:04 crc kubenswrapper[4926]: E1122 11:46:04.583028 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:46:04 crc kubenswrapper[4926]: I1122 11:46:04.812549 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-pv5tc_1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc/kube-rbac-proxy/0.log" Nov 22 11:46:04 crc kubenswrapper[4926]: I1122 11:46:04.904207 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-pv5tc_1d4b05be-2133-4a40-a7e8-7e4b49f4c0bc/controller/0.log" Nov 22 11:46:05 crc kubenswrapper[4926]: I1122 11:46:05.581058 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:46:05 crc kubenswrapper[4926]: I1122 11:46:05.765804 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:46:05 crc kubenswrapper[4926]: I1122 11:46:05.819511 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:46:05 crc kubenswrapper[4926]: I1122 11:46:05.819574 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:46:05 crc kubenswrapper[4926]: I1122 11:46:05.859363 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.027205 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.028064 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.037316 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.077658 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.195996 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-frr-files/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.242534 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-reloader/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.243954 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/cp-metrics/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.249213 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/controller/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.450041 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/frr-metrics/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.457239 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/kube-rbac-proxy/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.473805 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/kube-rbac-proxy-frr/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.647172 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/reloader/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.738787 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-ndwkc_e8452d37-6eed-427b-9741-bda6aea54331/frr-k8s-webhook-server/0.log" Nov 22 11:46:06 crc kubenswrapper[4926]: I1122 11:46:06.948517 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7d858964b4-hd89m_afb6b154-40e5-4285-9f49-38053bdbb6c4/manager/0.log" Nov 22 11:46:07 crc kubenswrapper[4926]: I1122 11:46:07.133619 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xg4ns_1302de5c-2784-4974-b2ac-3572fc73e1d9/kube-rbac-proxy/0.log" Nov 22 11:46:07 crc kubenswrapper[4926]: I1122 11:46:07.134646 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-676845568d-nb86k_63e553c4-290f-4b65-a563-b57f0577c982/webhook-server/0.log" Nov 22 11:46:07 crc kubenswrapper[4926]: I1122 11:46:07.655808 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pb2r5_493e392b-c61f-4115-abdf-42a9c2febe81/frr/0.log" Nov 22 11:46:07 crc kubenswrapper[4926]: I1122 11:46:07.688104 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-xg4ns_1302de5c-2784-4974-b2ac-3572fc73e1d9/speaker/0.log" Nov 22 11:46:18 crc kubenswrapper[4926]: I1122 11:46:18.582482 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:46:18 crc kubenswrapper[4926]: E1122 11:46:18.583292 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:46:20 crc kubenswrapper[4926]: I1122 11:46:20.913300 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.102854 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.126322 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.165862 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.311795 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/util/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.347327 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/extract/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.350370 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enqqjf_a5213e13-fc34-444f-91d3-df6d09816a68/pull/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.494121 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.663301 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.677398 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.677644 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.845625 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-utilities/0.log" Nov 22 11:46:21 crc kubenswrapper[4926]: I1122 11:46:21.855958 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/extract-content/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.031842 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.251432 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.309024 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.371960 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.490914 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-wlbxq_800e7d7e-7580-482b-bf81-728557492bcd/registry-server/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.594914 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-utilities/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.616466 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/extract-content/0.log" Nov 22 11:46:22 crc kubenswrapper[4926]: I1122 11:46:22.792312 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.067940 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-bpmk4_09054c3e-6dab-449c-a04e-1ba78e281575/registry-server/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.095087 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.132294 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.147968 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.312031 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/util/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.312607 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/pull/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.335536 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6lv2rq_de687d1f-94d7-4503-9599-7a43bff94909/extract/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.484527 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-b7gxf_b71cda98-e97f-4b9c-93d9-74c8cabe6420/marketplace-operator/0.log" Nov 22 11:46:23 crc kubenswrapper[4926]: I1122 11:46:23.503265 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.564967 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.570749 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.570983 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.737505 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-utilities/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.775994 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/extract-content/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.837476 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.937097 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-szcbp_1d1eac02-20b5-4cfc-add3-a5f9d687455b/registry-server/0.log" Nov 22 11:46:24 crc kubenswrapper[4926]: I1122 11:46:24.987427 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:46:25 crc kubenswrapper[4926]: I1122 11:46:25.015546 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:46:25 crc kubenswrapper[4926]: I1122 11:46:25.035207 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:46:25 crc kubenswrapper[4926]: I1122 11:46:25.174423 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-content/0.log" Nov 22 11:46:25 crc kubenswrapper[4926]: I1122 11:46:25.204299 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/extract-utilities/0.log" Nov 22 11:46:25 crc kubenswrapper[4926]: I1122 11:46:25.701749 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-v4wgk_17936bf0-470e-4bc3-aa1d-28727f066d93/registry-server/0.log" Nov 22 11:46:33 crc kubenswrapper[4926]: I1122 11:46:33.582541 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:46:33 crc kubenswrapper[4926]: E1122 11:46:33.583607 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:46:48 crc kubenswrapper[4926]: I1122 11:46:48.581929 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:46:48 crc kubenswrapper[4926]: E1122 11:46:48.582662 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:46:48 crc kubenswrapper[4926]: E1122 11:46:48.770286 4926 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.248:60946->38.102.83.248:35555: write tcp 38.102.83.248:60946->38.102.83.248:35555: write: broken pipe Nov 22 11:47:03 crc kubenswrapper[4926]: I1122 11:47:03.586834 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:47:03 crc kubenswrapper[4926]: E1122 11:47:03.587790 4926 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-xr9nd_openshift-machine-config-operator(d4977b14-85c3-4141-9b15-1768f09e8d27)\"" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" Nov 22 11:47:15 crc kubenswrapper[4926]: I1122 11:47:15.582518 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" Nov 22 11:47:16 crc kubenswrapper[4926]: I1122 11:47:16.285179 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"93b589e51e1eb7194139c18e30c2a8be0921561965463b3b40949403738ea9f9"} Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.484325 4926 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:22 crc kubenswrapper[4926]: E1122 11:47:22.485336 4926 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18b96fb2-d21b-4f30-b1f6-b64204879976" containerName="collect-profiles" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.485354 4926 state_mem.go:107] "Deleted CPUSet assignment" podUID="18b96fb2-d21b-4f30-b1f6-b64204879976" containerName="collect-profiles" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.485612 4926 memory_manager.go:354] "RemoveStaleState removing state" podUID="18b96fb2-d21b-4f30-b1f6-b64204879976" containerName="collect-profiles" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.487301 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.498063 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.537466 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.537605 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbpms\" (UniqueName: \"kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.537671 4926 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.639481 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbpms\" (UniqueName: \"kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.639560 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.639695 4926 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.640144 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.640197 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.661215 4926 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbpms\" (UniqueName: \"kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms\") pod \"certified-operators-gw7fl\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:22 crc kubenswrapper[4926]: I1122 11:47:22.818168 4926 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:23 crc kubenswrapper[4926]: I1122 11:47:23.180405 4926 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:23 crc kubenswrapper[4926]: I1122 11:47:23.376385 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerStarted","Data":"ed07189f70fda2e06c3380405faa8d4c6907ed646a77f6e95211969ee4e0d569"} Nov 22 11:47:24 crc kubenswrapper[4926]: I1122 11:47:24.393870 4926 generic.go:334] "Generic (PLEG): container finished" podID="62f04381-a43f-4063-8fba-02eb4d4ecf14" containerID="c7261fcf67a9626fcc9dfde698035d39cecc5659a315509139384c711cc4fc25" exitCode=0 Nov 22 11:47:24 crc kubenswrapper[4926]: I1122 11:47:24.393960 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerDied","Data":"c7261fcf67a9626fcc9dfde698035d39cecc5659a315509139384c711cc4fc25"} Nov 22 11:47:24 crc kubenswrapper[4926]: I1122 11:47:24.397454 4926 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 11:47:25 crc kubenswrapper[4926]: I1122 11:47:25.404108 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerStarted","Data":"5471c76fbc42513004df494b605abd4f8f471bad773861e61b3d7bf34bc0db97"} Nov 22 11:47:26 crc kubenswrapper[4926]: I1122 11:47:26.420278 4926 generic.go:334] "Generic (PLEG): container finished" podID="62f04381-a43f-4063-8fba-02eb4d4ecf14" containerID="5471c76fbc42513004df494b605abd4f8f471bad773861e61b3d7bf34bc0db97" exitCode=0 Nov 22 11:47:26 crc kubenswrapper[4926]: I1122 11:47:26.420413 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerDied","Data":"5471c76fbc42513004df494b605abd4f8f471bad773861e61b3d7bf34bc0db97"} Nov 22 11:47:27 crc kubenswrapper[4926]: I1122 11:47:27.433233 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerStarted","Data":"9fad15456fe1d5a14f4aec40118e50f430f742e7b52d3661b6693982c3f5c01a"} Nov 22 11:47:27 crc kubenswrapper[4926]: I1122 11:47:27.453697 4926 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gw7fl" podStartSLOduration=2.981060564 podStartE2EDuration="5.453678643s" podCreationTimestamp="2025-11-22 11:47:22 +0000 UTC" firstStartedPulling="2025-11-22 11:47:24.397114965 +0000 UTC m=+4064.698720262" lastFinishedPulling="2025-11-22 11:47:26.869733044 +0000 UTC m=+4067.171338341" observedRunningTime="2025-11-22 11:47:27.451251524 +0000 UTC m=+4067.752856851" watchObservedRunningTime="2025-11-22 11:47:27.453678643 +0000 UTC m=+4067.755283930" Nov 22 11:47:32 crc kubenswrapper[4926]: I1122 11:47:32.819362 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:32 crc kubenswrapper[4926]: I1122 11:47:32.820067 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:32 crc kubenswrapper[4926]: I1122 11:47:32.958495 4926 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:33 crc kubenswrapper[4926]: I1122 11:47:33.568488 4926 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:33 crc kubenswrapper[4926]: I1122 11:47:33.638117 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:35 crc kubenswrapper[4926]: I1122 11:47:35.514976 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gw7fl" podUID="62f04381-a43f-4063-8fba-02eb4d4ecf14" containerName="registry-server" containerID="cri-o://9fad15456fe1d5a14f4aec40118e50f430f742e7b52d3661b6693982c3f5c01a" gracePeriod=2 Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.529919 4926 generic.go:334] "Generic (PLEG): container finished" podID="62f04381-a43f-4063-8fba-02eb4d4ecf14" containerID="9fad15456fe1d5a14f4aec40118e50f430f742e7b52d3661b6693982c3f5c01a" exitCode=0 Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.529935 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerDied","Data":"9fad15456fe1d5a14f4aec40118e50f430f742e7b52d3661b6693982c3f5c01a"} Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.530215 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gw7fl" event={"ID":"62f04381-a43f-4063-8fba-02eb4d4ecf14","Type":"ContainerDied","Data":"ed07189f70fda2e06c3380405faa8d4c6907ed646a77f6e95211969ee4e0d569"} Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.530235 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed07189f70fda2e06c3380405faa8d4c6907ed646a77f6e95211969ee4e0d569" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.667945 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.731765 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities\") pod \"62f04381-a43f-4063-8fba-02eb4d4ecf14\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.731874 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gbpms\" (UniqueName: \"kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms\") pod \"62f04381-a43f-4063-8fba-02eb4d4ecf14\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.732059 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content\") pod \"62f04381-a43f-4063-8fba-02eb4d4ecf14\" (UID: \"62f04381-a43f-4063-8fba-02eb4d4ecf14\") " Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.740501 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities" (OuterVolumeSpecName: "utilities") pod "62f04381-a43f-4063-8fba-02eb4d4ecf14" (UID: "62f04381-a43f-4063-8fba-02eb4d4ecf14"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.741429 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms" (OuterVolumeSpecName: "kube-api-access-gbpms") pod "62f04381-a43f-4063-8fba-02eb4d4ecf14" (UID: "62f04381-a43f-4063-8fba-02eb4d4ecf14"). InnerVolumeSpecName "kube-api-access-gbpms". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.779785 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62f04381-a43f-4063-8fba-02eb4d4ecf14" (UID: "62f04381-a43f-4063-8fba-02eb4d4ecf14"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.834405 4926 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.834630 4926 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f04381-a43f-4063-8fba-02eb4d4ecf14-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 11:47:36 crc kubenswrapper[4926]: I1122 11:47:36.834748 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gbpms\" (UniqueName: \"kubernetes.io/projected/62f04381-a43f-4063-8fba-02eb4d4ecf14-kube-api-access-gbpms\") on node \"crc\" DevicePath \"\"" Nov 22 11:47:37 crc kubenswrapper[4926]: I1122 11:47:37.543466 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gw7fl" Nov 22 11:47:37 crc kubenswrapper[4926]: I1122 11:47:37.605091 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:37 crc kubenswrapper[4926]: I1122 11:47:37.631563 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gw7fl"] Nov 22 11:47:38 crc kubenswrapper[4926]: I1122 11:47:38.601681 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62f04381-a43f-4063-8fba-02eb4d4ecf14" path="/var/lib/kubelet/pods/62f04381-a43f-4063-8fba-02eb4d4ecf14/volumes" Nov 22 11:48:03 crc kubenswrapper[4926]: I1122 11:48:03.860415 4926 generic.go:334] "Generic (PLEG): container finished" podID="ffeb1cff-f4a8-416f-a082-a07741f82636" containerID="acaa2d716453e0542967a493ff8bdaf59a8e4c6697c28cf1059bfe70c3ab5920" exitCode=0 Nov 22 11:48:03 crc kubenswrapper[4926]: I1122 11:48:03.860524 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-kmzpd/must-gather-8dmck" event={"ID":"ffeb1cff-f4a8-416f-a082-a07741f82636","Type":"ContainerDied","Data":"acaa2d716453e0542967a493ff8bdaf59a8e4c6697c28cf1059bfe70c3ab5920"} Nov 22 11:48:03 crc kubenswrapper[4926]: I1122 11:48:03.862258 4926 scope.go:117] "RemoveContainer" containerID="acaa2d716453e0542967a493ff8bdaf59a8e4c6697c28cf1059bfe70c3ab5920" Nov 22 11:48:04 crc kubenswrapper[4926]: I1122 11:48:04.817101 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kmzpd_must-gather-8dmck_ffeb1cff-f4a8-416f-a082-a07741f82636/gather/0.log" Nov 22 11:48:15 crc kubenswrapper[4926]: I1122 11:48:15.607348 4926 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-kmzpd/must-gather-8dmck"] Nov 22 11:48:15 crc kubenswrapper[4926]: I1122 11:48:15.608147 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-kmzpd/must-gather-8dmck" podUID="ffeb1cff-f4a8-416f-a082-a07741f82636" containerName="copy" containerID="cri-o://3edf8cd06d0cfedaed79f85abef6829ceb95889e06a9d2cc08860f893f7db8b4" gracePeriod=2 Nov 22 11:48:15 crc kubenswrapper[4926]: I1122 11:48:15.614386 4926 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-kmzpd/must-gather-8dmck"] Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.016678 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kmzpd_must-gather-8dmck_ffeb1cff-f4a8-416f-a082-a07741f82636/copy/0.log" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.017790 4926 generic.go:334] "Generic (PLEG): container finished" podID="ffeb1cff-f4a8-416f-a082-a07741f82636" containerID="3edf8cd06d0cfedaed79f85abef6829ceb95889e06a9d2cc08860f893f7db8b4" exitCode=143 Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.017990 4926 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa2f8bc084cab92c53d56d5dd5af609a233a93420b44c67020bd84825cc1602b" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.085923 4926 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-kmzpd_must-gather-8dmck_ffeb1cff-f4a8-416f-a082-a07741f82636/copy/0.log" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.086220 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.211036 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8gbq\" (UniqueName: \"kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq\") pod \"ffeb1cff-f4a8-416f-a082-a07741f82636\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.211297 4926 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output\") pod \"ffeb1cff-f4a8-416f-a082-a07741f82636\" (UID: \"ffeb1cff-f4a8-416f-a082-a07741f82636\") " Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.217072 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq" (OuterVolumeSpecName: "kube-api-access-m8gbq") pod "ffeb1cff-f4a8-416f-a082-a07741f82636" (UID: "ffeb1cff-f4a8-416f-a082-a07741f82636"). InnerVolumeSpecName "kube-api-access-m8gbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.314159 4926 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8gbq\" (UniqueName: \"kubernetes.io/projected/ffeb1cff-f4a8-416f-a082-a07741f82636-kube-api-access-m8gbq\") on node \"crc\" DevicePath \"\"" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.340597 4926 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ffeb1cff-f4a8-416f-a082-a07741f82636" (UID: "ffeb1cff-f4a8-416f-a082-a07741f82636"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.417027 4926 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ffeb1cff-f4a8-416f-a082-a07741f82636-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 11:48:16 crc kubenswrapper[4926]: I1122 11:48:16.593798 4926 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffeb1cff-f4a8-416f-a082-a07741f82636" path="/var/lib/kubelet/pods/ffeb1cff-f4a8-416f-a082-a07741f82636/volumes" Nov 22 11:48:17 crc kubenswrapper[4926]: I1122 11:48:17.028260 4926 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-kmzpd/must-gather-8dmck" Nov 22 11:49:23 crc kubenswrapper[4926]: I1122 11:49:23.330354 4926 scope.go:117] "RemoveContainer" containerID="acaa2d716453e0542967a493ff8bdaf59a8e4c6697c28cf1059bfe70c3ab5920" Nov 22 11:49:23 crc kubenswrapper[4926]: I1122 11:49:23.410473 4926 scope.go:117] "RemoveContainer" containerID="3edf8cd06d0cfedaed79f85abef6829ceb95889e06a9d2cc08860f893f7db8b4" Nov 22 11:49:39 crc kubenswrapper[4926]: I1122 11:49:39.661642 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:49:39 crc kubenswrapper[4926]: I1122 11:49:39.662316 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:50:09 crc kubenswrapper[4926]: I1122 11:50:09.661148 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:50:09 crc kubenswrapper[4926]: I1122 11:50:09.661948 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:50:39 crc kubenswrapper[4926]: I1122 11:50:39.661602 4926 patch_prober.go:28] interesting pod/machine-config-daemon-xr9nd container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 11:50:39 crc kubenswrapper[4926]: I1122 11:50:39.662218 4926 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 11:50:39 crc kubenswrapper[4926]: I1122 11:50:39.662269 4926 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" Nov 22 11:50:39 crc kubenswrapper[4926]: I1122 11:50:39.663192 4926 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"93b589e51e1eb7194139c18e30c2a8be0921561965463b3b40949403738ea9f9"} pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 11:50:39 crc kubenswrapper[4926]: I1122 11:50:39.663268 4926 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" podUID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerName="machine-config-daemon" containerID="cri-o://93b589e51e1eb7194139c18e30c2a8be0921561965463b3b40949403738ea9f9" gracePeriod=600 Nov 22 11:50:40 crc kubenswrapper[4926]: I1122 11:50:40.672344 4926 generic.go:334] "Generic (PLEG): container finished" podID="d4977b14-85c3-4141-9b15-1768f09e8d27" containerID="93b589e51e1eb7194139c18e30c2a8be0921561965463b3b40949403738ea9f9" exitCode=0 Nov 22 11:50:40 crc kubenswrapper[4926]: I1122 11:50:40.672430 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerDied","Data":"93b589e51e1eb7194139c18e30c2a8be0921561965463b3b40949403738ea9f9"} Nov 22 11:50:40 crc kubenswrapper[4926]: I1122 11:50:40.672711 4926 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-xr9nd" event={"ID":"d4977b14-85c3-4141-9b15-1768f09e8d27","Type":"ContainerStarted","Data":"99dd8916f8071be0d12a37b52289d3ecc15e4762d8a14ccc38b623955d9e748f"} Nov 22 11:50:40 crc kubenswrapper[4926]: I1122 11:50:40.672736 4926 scope.go:117] "RemoveContainer" containerID="23214392718d6cb9dbc33ab7d4275deb5093396ce41b71fae35e5d84eda8c403" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110321724024441 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110321724017356 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110311041016470 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110311041015440 5ustar corecore